diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3cef9b34fa..449ff5cbbd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -259,7 +259,7 @@ set(_BUILD_DIRS
acb_theta dirichlet bernoulli hypgeom
gr gr_generic gr_vec gr_mat
- gr_poly gr_mpoly gr_special
+ gr_poly gr_mpoly gr_sparse_vec gr_sparse_mat gr_special
calcium
fmpz_mpoly_q
diff --git a/Makefile.in b/Makefile.in
index c948a21458..81b08c0b84 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -201,7 +201,8 @@ HEADER_DIRS := \
acb_theta dirichlet bernoulli hypgeom \
\
gr gr_generic gr_vec gr_mat \
- gr_poly gr_mpoly gr_special \
+ gr_poly gr_mpoly gr_sparse_vec gr_sparse_mat \
+ gr_special \
\
calcium \
fmpz_mpoly_q \
diff --git a/doc/source/gr_mat.rst b/doc/source/gr_mat.rst
index 9a2ac83670..3759be1800 100644
--- a/doc/source/gr_mat.rst
+++ b/doc/source/gr_mat.rst
@@ -199,20 +199,26 @@ Basic row, column and entry operations
``c`` is the number of columns of ``mat``. If ``perm`` is non-``NULL``, the
permutation of the columns will also be applied to ``perm``.
-.. function:: truth_t gr_mat_is_empty(const gr_mat_t mat, gr_ctx_t ctx)
+.. macro:: gr_mat_is_empty(mat, ctx)
Returns whether *mat* is an empty matrix, having either zero
rows or zero column. This predicate is always decidable (even if
the underlying ring is not computable), returning
``T_TRUE`` or ``T_FALSE``.
-.. function:: truth_t gr_mat_is_square(const gr_mat_t mat, gr_ctx_t ctx)
+.. macro:: gr_mat_is_square(mat, ctx)
Returns whether *mat* is a square matrix, having the same number
of rows as columns (not the same thing as being a perfect square!).
This predicate is always decidable (even if the underlying ring
is not computable), returning ``T_TRUE`` or ``T_FALSE``.
+.. macro:: gr_mat_is_compatible(mat1, mat2, ctx)
+
+ Returns whether *mat1* and *mat2* have the same dimensions.
+ This predicate is always decidable (even if the underlying ring
+ is not computable), returning ``T_TRUE`` or ``T_FALSE``.
+
Arithmetic
-------------------------------------------------------------------------------
diff --git a/doc/source/gr_sparse_mat.rst b/doc/source/gr_sparse_mat.rst
new file mode 100644
index 0000000000..b5c95e80d2
--- /dev/null
+++ b/doc/source/gr_sparse_mat.rst
@@ -0,0 +1,550 @@
+.. _gr-sparse-vec:
+
+**gr_sparse_mat.h** -- Sparse matrices over generic rings
+===============================================================================
+
+The types :type:`gr_csr_mat_t` and :type:`gr_lil_mat_t` are representions
+of matrices over a generic
+ring which are optimized for the situation where there are many entries which
+are zero (the matrix is *sparse*, or has *low density*). Internally, the
+matrix records only the positions of the nonzeros and their values.
+Although technically these types and all its functions will operate correctly
+with any density, a regular :type:`gr_mat_t` may be more efficient in the number
+of nonzeros is not small.
+
+Types and basic access
+--------------------------------------------------------------------------------
+
+.. type:: gr_csr_mat_struct
+
+.. type:: gr_csr_mat_t
+
+ This struct presents a sparse matrix in "compressed sparse row" form, and contains:
+
+ * An `slong` value ``r``, the number of rows in the matrix.
+ * An `slong` value ``c``, the number of columns in the matrix.
+ * An `slong` value ``nnz``, the number of nonzeroes in the matrix.
+ * An `slong` value ``alloc``, the maximum number of nonzeroes currently allocated.
+ * A `ulong` array ``rows`` of length ``r+1`` providing row offsets.
+ * A `ulong` array ``cols`` of length ``nnz`` providing column indices.
+ * A :type:`gr_ptr` array ``nzs`` of length ``nnz`` providing nonzero values.
+
+ For a given row of index ``row``, its nonzero columns and associated values are given
+ by the components of ``cols`` and ``entries`` associated to
+ indices between ``rows[row]`` and ``rows[row+1]``.
+ We always require:
+
+ * The ``cols`` for each row are sorted into strictly increasing order.
+ * The ``entries`` are nonzero (meaning, ``gr_is_zero(GR_ENTRY(vec->nzs, i, ctx->sizeof_elem), ctx)`` returns ``T_FALSE`` for ``0 <= i < vec->nnz``).
+ * We have ``nnz <= alloc <= length``.
+
+ A ``gr_csr_mat_t`` is defined as an array of length one of type
+ ``gr_csr_mat_struct``, permitting a ``gr_csr_mat_t`` to
+ be passed by reference. Note that ``gr_csr_mat_t`` is the more efficient
+ way to represent a sparse matrix, but is less convenient for modification.
+
+.. type:: gr_lil_mat_struct
+
+.. type:: gr_lil_mat_t
+
+ This struct presents a sparse matrix in "list of lists" form, and contains:
+
+ * An `slong` value ``r``, the number of rows in the matrix.
+ * An `slong` value ``c``, the number of columns in the matrix.
+ * An `slong` value ``nnz``, the number of nonzeroes in the matrix.
+ * A :type:`gr_sparse_vec_t` array ``rows`` of length ``r``, the (sparse) rows of the matrix.
+
+ A ``gr_lil_mat_t`` is defined as an array of length one of type
+ ``gr_lil_mat_struct``, permitting a ``gr_lil_mat_t`` to
+ be passed by reference. Note that ``gr_lil_mat_t`` is a less efficient
+ way to represent a sparse matrix, but is more convenient for modification.
+
+.. type:: gr_coo_mat_struct
+
+.. type:: gr_coo_mat_t
+
+ This struct presents a sparse matrix in (coo)rdinate form, and contains:
+
+ * An `slong` value ``r``, the number of rows in the matrix.
+ * An `slong` value ``c``, the number of columns in the matrix.
+ * An `slong` value ``nnz``, the number of nonzeroes in the matrix.
+ * An `slong` value ``alloc``, the maximum number of nonzeroes currently allocated.
+ * A :type:`gr_sparse_vec_t` array ``rows`` of length ``nnz``, the list of rows for each entry.
+ * A :type:`gr_sparse_vec_t` array ``cols`` of length ``nnz``, the list of columns for each entry.
+ * A :type:`gr_sparse_vec_t` array ``nzs`` of length ``nnz``, the list of columns for each entry.
+ * A `truth_t` value `is_canonical`, with is `T_TRUE` if the matrix is in "canonical" form (sorted by rows and columns, with unique non-zero entries).
+
+ A ``gr_coo_mat_t`` is defined as an array of length one of type
+ ``gr_coo_mat_struct``, permitting a ``gr_coo_mat_t`` to
+ be passed by reference. Note that ``gr_coo_mat_t`` is the least efficient
+ way to represent a sparse matrix (and has minimal functionality), but
+ is convenient for constructing from arbitrary lists of entries.
+
+.. function:: void gr_sparse_mat_nrows(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_sparse_mat_ncols(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_sparse_mat_nnz(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_sparse_mat_nrows(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_sparse_mat_ncols(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_sparse_mat_nnz(gr_csr_mat_t mat, gr_ctx_t ctx)
+
+ Get the number of rows/columns/nonzeroes of the given sparse matrix,
+ in any representation.
+
+.. macro:: GR_CSR_MAT_COL(mat, i, j)
+ GR_LIL_MAT_COL(mat, i, j)
+
+ Get a pointer to the column of the *j*-th nonzero in the *i*-th row of *mat*.
+ There is no bounds checking.
+
+.. macro:: GR_CSR_MAT_ENTRY(mat, i, j, sz)
+ GR_LIL_MAT_ENTRY(mat, i, j, sz)
+
+ Get a pointer to the value of the *j*-th nonzero entry in the *i*-th row of *mat*,
+ given the size of each entry (typically obtained as ``ctx->sizeof_elem``).
+ There is no bounds checking.
+
+.. macro:: GR_COO_MAT_ROW(mat, i)
+ GR_LIL_MAT_COL(mat, i)
+ GR_LIL_MAT_ENTRY(mat, i)
+
+ Get a pointer to the row, column, and value of the *i*-th nonzero in the matrix, repectively.
+ There is no bounds checking.
+
+.. function:: ulong * gr_csr_mat_col_ptr(gr_csr_mat_t mat, slong i, slong j)
+ const ulong * gr_csr_mat_col_srcptr(const gr_csr_mat_t mat, slong i, slong j)
+ ulong * gr_lil_mat_col_ptr(gr_lil_mat_t mat, slong i, slong j)
+ const ulong * gr_lil_mat_col_srcptr(const gr_lil_mat_t mat, slong i, slong j)
+
+ Get a (const) pointer to the column of the *j*-th nonzero in the *i*-th row of *mat*.
+ If the location is out of bounds, the function returns NULL.
+
+.. function:: gr_ptr gr_csr_mat_entry_ptr(gr_csr_mat_t mat, slong i, slong j, gr_ctx_t ctx)
+ gr_srcptr gr_csr_mat_entry_srcptr(gr_csr_mat_t mat, slong i, slong j, gr_ctx_t ctx)
+ gr_ptr gr_lil_mat_entry_ptr(gr_lil_mat_t mat, slong i, slong j, gr_ctx_t ctx)
+ gr_srcptr gr_lil_mat_entry_srcptr(gr_lil_mat_t mat, slong i, slong j, gr_ctx_t ctx)
+
+ Get a (const) pointer to the *j*-th nonzero entry in the *i*-th row of *mat*.
+ If the location is out of bounds, the function returns NULL.
+
+.. function:: ulong * gr_coo_mat_row_ptr(gr_coo_mat_t mat, slong i)
+ const ulong * gr_coo_mat_row_srcptr(const gr_coo_mat_t mat, slong i)
+ ulong * gr_coo_mat_col_ptr(gr_coo_mat_t mat, slong i)
+ const ulong * gr_coo_mat_col_srcptr(const gr_coo_mat_t mat, slong i)
+ gr_ptr gr_coo_mat_entry_ptr(gr_coo_mat_t mat, slong i)
+ gr_srcptr gr_coo_mat_entry_ptr(const gr_coo_mat_t mat, slong i)
+
+.. function:: void gr_csr_mat_init(gr_csr_mat_t mat, slong rows, slong cols, gr_ctx_t ctx)
+ void gr_lil_mat_init(gr_lil_mat_t mat, slong rows, slong cols, gr_ctx_t ctx)
+ void gr_coo_mat_init(gr_coo_mat_t mat, slong rows, slong cols, gr_ctx_t ctx)
+
+ Initializes *mat* to a *rows* x *cols* matrix with no nonzeros.
+
+.. function:: void gr_csr_mat_clear(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_lil_mat_clear(gr_lil_mat_t mat, gr_ctx_t ctx)
+ void gr_coo_mat_clear(gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Clears the matrix *mat*.
+
+.. function:: gr_csr_mat_swap(gr_csr_mat_t mat1, gr_csr_mat_t mat2, gr_ctx_t ctx)
+ gr_lil_mat_swap(gr_lil_mat_t mat1, gr_lil_mat_t mat2, gr_ctx_t ctx)
+ gr_coo_mat_swap(gr_coo_mat_t mat1, gr_coo_mat_t mat2, gr_ctx_t ctx)
+
+ Swap data underlying two sparse matrices (no allocation or copying).
+
+.. function:: void gr_csr_mat_fit_nnz(gr_csr_mat_t mat, slong nnz, gr_ctx_t ctx)
+ void gr_lil_mat_fit_nnz(gr_lil_mat_t mat, slong *nnz, gr_ctx_t ctx)
+ void gr_coo_mat_fit_nnz(gr_coo_mat_t mat, slong nnz, gr_ctx_t ctx)
+
+ Ensure that *mat* has enough storage to hold at least *nnz* nonzeros. This does
+ not change the dimensions of the matrix or the number of nonzeros stored.
+ Note that, for matrices in *lil* form, *nnz* is a vector of sizes, one for each row.
+
+.. function:: void gr_csr_mat_shrink_to_nnz(gr_csr_mat_t mat, gr_ctx_t ctx)
+ void gr_lil_mat_shrink_to_nnz(gr_lil_mat_t mat, gr_ctx_t ctx)
+ void gr_coo_mat_shrink_to_nnz(gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Reallocate the storage in *mat* down the current number of nonzeros.
+
+ Note that, for matrices in *lil* form, the operation is performed
+ on each row.
+
+.. function:: void gr_csr_mat_set_cols(gr_csr_mat_t mat, slong cols, gr_ctx_t ctx)
+ void gr_lil_mat_set_cols(gr_lil_mat_t mat, slong cols, gr_ctx_t ctx)
+ void gr_coo_mat_set_cols(gr_coo_mat_t mat, slong cols, gr_ctx_t ctx)
+
+ Set the nominal number of columns of the matrix *mat* to *cols*. If *cols* is smaller than
+ the current number of columns of *mat*, any entries whose columns are at least *cols*
+ are truncated. That is, the number of nonzeros can change (but the allocation does not).
+
+.. function:: int gr_coo_mat_from_entries(gr_coo_mat_t mat, ulong *rows, ulong *cols, gr_srcptr entries, slong nnz, truth_t is_canonical, gr_ctx_t ctx)
+
+ Construct a sparse matrix in coordinate form from three list of corresponding
+ rows, columns, and (presumably nonzero) values. If *is_canonical* is set to *T_TRUE*,
+ the rows and columns are assumed to sorted in row-column order and unique, and the
+ values are assumed to be non-zero. Otherwise, they may be in any order with zeroes,
+ and duplicate elements are assumed to add.
+
+.. function:: truth_t gr_coo_mat_is_canonical(gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Check if a matrix in coordinate form is in canonical form, with sorted and unique
+ (row, column) pairs and no (known) zero entries.
+
+.. function:: int gr_coo_mat_canonicalize(gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Put a coordinate-form sparse matrix in canonical form.
+
+.. function:: int gr_coo_mat_randtest(gr_coo_mat_t mat, slong nnz, int replacement, truth_t is_canonical, flint_rand_t state, gr_ctx_t ctx)
+
+ Construct a random coordinate-form sparse matrix *mat* with (approximately) *nnz* nonzeroes,
+ in coordinate form iff *is_canonical* is set to *T_TRUE*. If replacement is set, the
+ (row, column) indices are chosen randomly with replacement, so the actual number of nonzeroes
+ may be slightly less. Otherwise, the matrix is guaranteed to have *nnz* identically distributed
+ nonzeroes, using reservoir sampling. The latter is better for sampling high density matrices,
+ the former for low density ones. In both cases, the nonzero values are chosen using ``gr_randtest_nonzero``.
+
+.. function:: int gr_coo_mat_randtest_prob(gr_coo_mat_t mat, double prob, flint_rand_t state, gr_ctx_t ctx)
+
+ Construct a random coordinate-form sparse matrix *mat*, with each entry set to a random nonzero
+ value with probability *prob*.
+
+Getting, setting and conversion
+--------------------------------------------------------------------------------
+
+.. function:: gr_ptr gr_csr_mat_find_entry(gr_csr_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+ gr_ptr gr_lil_mat_find_entry(gr_lil_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+ gr_ptr gr_coo_mat_find_entry(gr_coo_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+ int gr_csr_mat_get_entry(gr_ptr res, gr_csr_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+ int gr_lil_mat_get_entry(gr_ptr res, gr_lil_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+ int gr_coo_mat_get_entry(gr_ptr res, gr_coo_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+
+ The *find* functions look for an entry at position (*row*, *col*), returning it if found and NULL
+ otherwise; the corresponding *get* functions instead copy any value found into *res*, and set
+ *res* to zero if that position is not found. In the case of non-canonical form COO
+ sparse matrices, the value is taken to be the first nonzero found at that positiion.
+
+ Because of the way sparse matrices are represented, this is
+ * logarithmic time in the number of nonzeros in the specified row, for CSR and LIL matrices,
+ * logarithmic time in the number of nonzeros in the matrix, for canonical form COO matrices, and
+ * linear time in the number of nonzeroes in the matrix, for non-canonical form COO matrices.
+
+.. function:: int gr_csr_mat_set_entry(gr_csr_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx)
+ int gr_lil_mat_set_entry(gr_lil_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx)
+ int gr_coo_mat_set_entry(gr_coo_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx)
+
+ Set the the entry at location (*row*, *col*) to be *entry*.
+
+ Because of the way sparse vectors are represented, it is not efficient to call this function
+ repeatedly: for the list of lists representation, it is linear time in the number of nonzeros
+ in the associated row; for the sparse compressed row representation, it is linear time in the
+ overall number of nonzeroes. If possible, the entries to update should be batched up and
+ given using `gr_coo_mat_from_entries`.
+
+.. function:: int gr_csr_mat_zero(gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_zero(gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_zero(gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Set *mat* to the zero matrix (by setting *nnz* to 0; no actual element values are changed).
+
+.. function:: int gr_csr_mat_set(gr_csr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_set(gr_lil_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_set(gr_coo_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_csr_mat_set_lil_mat(gr_csr_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_set_csr_mat(gr_lil_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_set_lil_mat(gr_coo_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_set_csr_mat(gr_coo_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_csr_mat_set_coo_mat(gr_csr_mat_t res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_set_coo_mat(gr_lil_mat_t res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Set *res* to a copy of *mat*, possibly changing the type of representation.
+
+.. function:: int gr_csr_mat_set_mat(gr_csr_mat_t res, gr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_set_mat(gr_lil_mat_t res, gr_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_set_mat(gr_coo_mat_t res, gr_mat_t mat, gr_ctx_t ctx)
+
+ Set *res* from the (nominally) dense matrix *mat*.
+
+.. function:: int gr_mat_set_csr_mat(gr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_mat_set_lil_mat(gr_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_mat_set_coo_mat(gr_mat_t res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Set a dense matrix *res* from the sparse matrix *mat*, for some representation.
+
+.. function:: int gr_csr_mat_init_set(gr_csr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_init_set(gr_lil_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_csr_mat_init_set_lil_mat(gr_csr_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_init_set_csr_mat(gr_lil_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_init_set_lil_mat(gr_coo_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_init_set_csr_mat(gr_coo_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_csr_mat_init_set_coo_mat(gr_csr_mat_t res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_init_set_coo_mat(gr_lil_mat_t res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+ int gr_csr_mat_init_set_mat(gr_csr_mat_t res, const gr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_init_set_mat(gr_lil_mat_t res, const gr_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_init_set_mat(gr_coo_mat_t res, const gr_mat_t mat, gr_ctx_t ctx)
+ int gr_mat_init_set_csr_mat(gr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_mat_init_set_lil_mat(gr_mat_t res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_mat_init_set_coo_mat(gr_mat_t res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Simultaneous initialize and setting.
+
+.. function:: int gr_lil_mat_update(gr_lil_mat_t res, const gr_lil_mat_t_t src, gr_ctx_t ctx)
+
+ Update *res* with the nonzeros in *src*. That is, any (row, col) indices in *res* which also appear
+ in *src* are overwritten with their values in *src*. Any indices in *res* which do
+ not appear in *src* are left unchanged.
+
+.. function:: void gr_lil_mat_window_init(gr_lil_mat_t window, const gr_lil_mat_t mat, slong r1, slong c1, slong r2, slong c2, gr_ctx_t ctx)
+ void gr_lil_mat_window_clear(gr_lil_mat_t window, gr_ctx_t ctx)
+
+ A window is a view on a submatrix with a given interval of rows and columns, and is provided
+ by using pointer offsets into the given matrix (so no copying is performed). The window
+ produced is read-only. TO BE IMPLEMENTD
+
+.. function:: int gr_scr_mat_permute_cols(gr_scr_mat_t mat, slong * perm, gr_ctx_t ctx)
+ int gr_lil_mat_permute_cols(gr_lil_mat_t mat, slong * perm, gr_ctx_t ctx)
+ int gr_coo_mat_permute_cols(gr_coo_mat_t mat, slong * perm, gr_ctx_t ctx)
+
+ Permute the columns in *mat* according to the given permutation, i.e., ``mat[r][perm[i]] = mat[i]``.
+
+.. function:: int gr_lil_mat_swap_rows(gr_lil_mat_t mat, slong * perm, slong r, slong s, gr_ctx_t ctx)
+ int gr_lil_mat_permute_rows(gr_lil_mat_t mat, const slong * perm, gr_ctx_t ctx)
+ int gr_lil_mat_invert_rows(gr_lil_mat_t mat, slong * perm, gr_ctx_t ctx)
+ int gr_scr_mat_invert_rows(gr_scr_mat_t mat, slong * perm, gr_ctx_t ctx)
+
+ Swap two rows in the matrix *mat*, permute the rows according to the given permutation,
+ or invert all the rows. Note that the permutation *perm* is an input for the rows permutation
+ function (required to be non-null), while for the other functions it may be optionally provided
+ to keep track of the permutation(s) performed.
+
+ Permuting the rows is TO BE IMPLEMENTED.
+
+ Because of the nature of the sparse compressed row representation, swapping
+ and permuting rows is an expensive operation and thus not provided.
+
+Comparison
+--------------------------------------------------------------------------------
+
+.. function:: truth_t gr_csr_mat_is_zero(const gr_csr_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_lil_mat_is_zero(const gr_lil_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_coo_mat_is_zero(gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Return ``T_TRUE`` if *mat* has no nonzeroes, ``T_FALSE`` if has any element
+ known to be nonzero, and ``T_UNKNOWN`` otherwise. For a coordinate-form matrix,
+ if it is not in canonical form, it is canonicalized before returning.
+
+.. function:: truth_t gr_csr_mat_is_one(const gr_csr_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_lil_mat_is_one(const gr_lil_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_coo_mat_is_one(const gr_coo_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_csr_mat_is_neg_one(const gr_csr_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_lil_mat_is_neg_one(const gr_lil_mat_t mat, gr_ctx_t ctx)
+ truth_t gr_coo_mat_is_neg_one(const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Return ``T_TRUE`` if *mat* is square, has one nonzero element in each row (at the
+ corresponding column), and that element is known to be equal to (negative) one; ``T_FALSE`` if
+ the matrix is not square, has any off-diagonal element known to be nonzero, or has
+ any diagonal known to not be (negative) one; and ``T_UNKNOWN`` otherwise.
+
+ Functions for coordinate-form matrices are TO BE IMPLEMENTED.
+
+.. function:: truth_t gr_csr_mat_equal(const gr_csr_mat_t mat1, const gr_csr_mat_t mat2, gr_ctx_t ctx)
+ truth_t gr_lil_mat_equal(const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+ truth_t gr_coo_mat_equal(const gr_coo_mat_t mat1, const gr_coo_mat_t mat2, gr_ctx_t ctx)
+ truth_t gr_csr_mat_equal_lil_mat(const gr_csr_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+
+ Returns ``T_TRUE`` if *mat1* and *mat2* represent the same matrix and ``T_FALSE`` otherwise.
+
+
+Output
+--------------------------------------------------------------------------------
+
+.. function:: int gr_csr_mat_write_nz(gr_stream_t out, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_write_nz(gr_stream_t out, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_write_nz(gr_stream_t out, const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Write the nonzeros of *mat* to the stream *out*. Using ``gr_mat_write`` together with
+ ``gr_mat_set_csr_mat``, ``gr_mat_set_lil_mat``, or ``gr_mat_set_lil_mat``,
+ if it is desired to write out the entire matrix, zeros and all.
+
+.. function:: int gr_csr_mat_print_nz(const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_print_nz(const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_print_nz(const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Write the nonzeros of *mat* to *stdout*. Using ``gr_mat_write`` together with
+ ``gr_mat_set_csr_mat``, ``gr_mat_set_lil_mat``, or ``gr_mat_set_lil_mat``,
+ if it is desired to write out the entire matrix, zeros and all.
+
+
+Arithmetic
+--------------------------------------------------------------------------------
+
+.. function:: int gr_csr_mat_neg(gr_csr_mat_t res, const gr_csr_mat_t src, gr_ctx_t ctx)
+ int gr_lil_mat_neg(gr_lil_mat_t res, const gr_lil_mat_t src, gr_ctx_t ctx)
+ int gr_coo_mat_neg(gr_coo_mat_t res, const gr_coo_mat_t src, gr_ctx_t ctx)
+
+ Set *res* to -*src*.
+
+.. function:: int gr_lil_mat_add(gr_lil_mat_t res, const gr_lil_mat_t src1, const gr_lil_mat_t src2, slong len, gr_ctx_t ctx)
+ int gr_lil_mat_sub(gr_lil_mat_t res, const gr_lil_mat_t src1, const gr_lil_mat_t src2, slong len, gr_ctx_t ctx)
+ int gr_lil_mat_mul(gr_lil_mat_t res, const gr_lil_mat_t src1, const gr_lil_mat_t src2, slong len, gr_ctx_t ctx)
+
+ Componentwise operations. (We do not provide analogous division or exponentiation
+ routines due since sparse inputs to these operations would be undefined or
+ fully dense.)
+
+.. function:: int gr_lil_mat_addmul_scalar(gr_lil_mat_t res, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_lil_mat_submul_scalar(gr_lil_mat_t res, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+
+ Componentwise add and sub mul, with different options for the scalar.
+
+
+
+Componentwise multiplication and division
+--------------------------------------------------------------------------------
+
+.. function:: int gr_csr_mat_mul_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_csr_mat_mul_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_csr_mat_mul_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_csr_mat_mul_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_csr_mat_mul_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_csr_mat_mul_scalar_2exp_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_csr_mat_div_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_csr_mat_div_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_csr_mat_div_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_csr_mat_div_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_csr_mat_div_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_csr_mat_divexact_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_csr_mat_divexact_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_csr_mat_divexact_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_csr_mat_divexact_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_csr_mat_divexact_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+
+ int gr_lil_mat_mul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_lil_mat_mul_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_lil_mat_mul_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_lil_mat_mul_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_lil_mat_mul_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_lil_mat_mul_scalar_2exp_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_lil_mat_div_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_lil_mat_div_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_lil_mat_div_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_lil_mat_div_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_lil_mat_div_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_lil_mat_divexact_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_lil_mat_divexact_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_lil_mat_divexact_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_lil_mat_divexact_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_lil_mat_divexact_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+
+ int gr_coo_mat_mul_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_coo_mat_mul_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_coo_mat_mul_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_coo_mat_mul_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_coo_mat_mul_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_coo_mat_mul_scalar_2exp_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_coo_mat_div_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_coo_mat_div_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_coo_mat_div_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_coo_mat_div_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_coo_mat_div_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_coo_mat_divexact_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_coo_mat_divexact_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+ int gr_coo_mat_divexact_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx)
+ int gr_coo_mat_divexact_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_coo_mat_divexact_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+
+ Set *dst* to be *src* multiplied or divided by *c*.
+ (Addition and subtraction are not provided because they would create
+ dense output.)
+
+Arithmetic into dense matrices
+--------------------------------------------------------------------------------
+
+.. function:: int gr_mat_update_lil_mat_nz(gr_ptr dres, const gr_lil_mat_t src, gr_ctx_t ctx)
+ int gr_mat_add_lil_mat(gr_ptr dres, gr_srcptr dvec1, const gr_lil_mat_t svec2, gr_ctx_t ctx)
+ int gr_mat_sub_lil_mat(gr_ptr dres, gr_srcptr dvec1, const gr_lil_mat_t svec2, gr_ctx_t ctx)
+ int gr_mat_mul_lil_mat_nz(gr_ptr dres, gr_srcptr dvec1, const gr_lil_mat_t svec2, gr_ctx_t ctx)
+ int gr_mat_div_lil_mat_nz(gr_ptr dres, gr_srcptr dvec1, const gr_lil_mat_t svec2, gr_ctx_t ctx)
+ int gr_mat_addmul_lil_mat_scalar(gr_ptr dres, const gr_lil_mat_t svec, gr_srcptr c, gr_ctx_t ctx)
+ int gr_mat_submul_lil_mat_scalar(gr_ptr dres, const gr_lil_mat_t svec, gr_srcptr c, gr_ctx_t ctx)
+ int gr_mat_addmul_lil_mat_scalar_si(gr_ptr dres, const gr_lil_mat_t svec, slong c, gr_ctx_t ctx)
+ int gr_mat_submul_lil_mat_scalar_si(gr_ptr dres, const gr_lil_mat_t svec, slong c, gr_ctx_t ctx)
+ int gr_mat_addmul_lil_mat_scalar_fmpz(gr_ptr dres, const gr_lil_mat_t svec, const fmpz_t c, gr_ctx_t ctx)
+ int gr_mat_submul_lil_mat_scalar_fmpz(gr_ptr dres, const gr_lil_mat_t svec, const fmpz_t c, gr_ctx_t ctx)
+
+ These functions facilitate accumulating a sparse matrix into a dense
+ target. They have one dense input, one sparse input, and a dense output
+ (where the dense input and output are the same for the fused operations).
+ For all functions, it is assumed that *dres* and *dvec1* have the same
+ shape as *svec* or *svec2*, as appropriate. All functions only modify
+ the locations in *dres* at which the sparse matrix has a nonzero value:
+ in particular, the functions *gr_mat_mul_lil_mat_nz* and
+ *gr_mat_div_lil_mat_nz* behave very differently from their dense counterparts.
+
+Sum and product
+--------------------------------------------------------------------------------
+
+.. function:: int gr_csr_mat_sum(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_sum(gr_ptr res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_sum(gr_ptr res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Set *res* to the sum of the entries in *mat*.
+
+.. function:: int gr_csr_mat_nz_product(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+ int gr_lil_mat_nz_product(gr_ptr res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+ int gr_coo_mat_nz_product(gr_ptr res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+
+ Set *res* to the product of the nonzero entries in *mat*.
+
+
+Matrix multiplication
+--------------------------------------------------------------------------------
+
+.. function:: int gr_csr_mat_mul_vec(gr_vec_t v, const gr_csr_mat_t A, const gr_vec_t u, gr_ctx_t ctx)
+ int gr_lil_mat_mul_vec(gr_vec_t v, const gr_lil_mat_t A, const gr_vec_t u, gr_ctx_t ctx)
+
+ Set *v* equal to `A \cdot u`, i.e., right multiplication by *u*.
+
+.. function:: int gr_csr_mat_mul_mat_transpose(gr_mat_t Ct, const gr_csr_mat_t A, const gr_mat_t Bt, gr_ctx_t ctx)
+ int gr_lil_mat_mul_mat_transpose(gr_mat_t Ct, const gr_lil_mat_t A, const gr_mat_t Bt, gr_ctx_t ctx)
+
+ Set *C^T* equal to `A \cdot B^T`, i.e., right multiplication with *B* and *C* considered to
+ be column matrices.
+
+.. function:: int gr_csr_mat_mul_mat(gr_mat_t C, const gr_csr_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+ int gr_lil_mat_mul_mat(gr_mat_t C, const gr_lil_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+
+ Set *C* equal to `A \cdot B`, i.e., perform right multiplication by *B*.
+
+
+Solving, nullvector, and nullspace computation
+--------------------------------------------------------------------------------
+
+.. function:: int gr_lil_mat_solve_lanczos(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, flint_rand_t state, gr_ctx_t ctx)
+ int gr_lil_mat_solve_block_lanczos(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+ int gr_lil_mat_solve_wiedemann(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, gr_ctx_t ctx)
+ int gr_lil_mat_solve_block_wiedemann(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+
+ Solve `Mx = b` for a sparse matrix `M` and *M->c* long column vector `b`, using either Lanczos' or Wiedemann's algorithm.
+ Both are randomized algorithms which use a given random state machine, and thus may fail without provably no solution
+ (returning `GR_UNABLE`). Both have block variants which are better for large matrices, and take an extra parameter of
+ `block_size`. The (block) Wiedemann algorithm requires the given matrix to be square, but not symmetric: the Lanczos
+ algorithm requires both, but assumes the given matrix may be neither, so instead solves `M^TMx = M^Tb`. Thus, it may
+ return a *pseudo-solution*, which solves the latter but not the former.
+
+.. function:: int gr_lil_mat_nullvector_lanczos(gr_ptr x, const gr_lil_mat_t M, flint_rand_t state, gr_ctx_t ctx)
+ int gr_lil_mat_nullvector_block_lanczos(gr_ptr x, const gr_lil_mat_t M, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+ int gr_lil_mat_nullvector_wiedemann(gr_ptr x, const gr_lil_mat_t M, flint_rand_t state, gr_ctx_t ctx)
+ int gr_lil_mat_nullvector_block_wiedemann(gr_ptr x, const gr_lil_mat_t M, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+
+ Find a nullvector for the sparse matrix `M`, using (block) Lanczos or Wiedemann.
+
+.. function:: int gr_lil_mat_nullspace(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, const char *algorithm, slong block_size, gr_ctx_t ctx)
+ int gr_lil_mat_nullspace_lanczos(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, gr_ctx_t ctx)
+ int gr_lil_mat_nullspace_wiedemann(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, gr_ctx_t ctx)
+ gr_lil_mat_nullspace_block_lanczos(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, slong block_size, gr_ctx_t ctx)
+ int gr_lil_mat_nullspace_block_wiedemann(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, slong block_size, gr_ctx_t ctx)
+
+ Find the nullspace for the sparse matrix `M`, using (block) Lanczos or Wiedemann.
+
+.. raw:: latexint
+
+ \newpage
diff --git a/doc/source/gr_sparse_vec.rst b/doc/source/gr_sparse_vec.rst
new file mode 100644
index 0000000000..788254b861
--- /dev/null
+++ b/doc/source/gr_sparse_vec.rst
@@ -0,0 +1,321 @@
+.. _gr-sparse-vec:
+
+**gr_sparse_vec.h** -- sparse vectors over generic rings
+===============================================================================
+
+A :type:`gr_sparse_vec_t` is a represention of a vector over a generic
+ring which is optimized for the situation where there are many entries which
+are zero (the vector is *sparse*, or has *low density*). Internally, the
+vector records only the positions of the nonzeros and their values.
+Although technically the type and all its functions will operate correctly
+with any density, a regular :type:`gr_vec_t` may be more efficient in the number
+of nonzeros is not small.
+
+Types and basic access
+--------------------------------------------------------------------------------
+
+.. type:: gr_sparse_vec_struct
+
+.. type:: gr_sparse_vec_t
+
+ This struct contains:
+
+ * an `slong` value ``length``, the nominal length of the vector,
+ * an `slong` value ``nnz``, the number of nonzero elements in vector,
+ * an `slong` value ``alloc``, the maximum number of nonzero element currently storable in the vector,
+ * a pointer to an `slong` array of indices (``inds``) of nonzeros.
+ * a :type:`gr_ptr` array of values of nonzeros (``nzs``).
+
+ The methods for sparse vectors maintain the following properties:
+
+ * the indices ``inds`` are unique and sorted into strictly increasing order;
+ * the ``nzs`` are nonzero (``gr_is_zero(entry, ctx)`` does not return ``T_TRUE``).
+ * ``nnz <= alloc <= length``.
+
+ A ``gr_sparse_vec_t`` is defined as an array of length one of type
+ ``gr_sparse_vec_struct``, permitting a ``gr_sparse_vec_t`` to
+ be passed by reference.
+
+.. function:: void gr_sparse_vec_init(gr_sparse_vec_t vec, slong len, gr_ctx_t ctx)
+
+ Initializes *vec* to an empty vector of length *len* in the ring *ctx*. The
+ length must be nonnegative. No storage for nonzeroes is allocated.
+
+.. function:: void gr_sparse_vec_clear(gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Clears the vector *vec*.
+
+.. function:: slong gr_sparse_vec_length(const gr_sparse_vec_t vec)
+
+ Return the nominal length of the vector (note: not the number of nonzeros).
+
+.. function:: void gr_sparse_vec_set_length(gr_sparse_vec_t vec, slong len, gr_ctx_t ctx)
+
+ Set the nominal length of the vector *vec* to *len*. If *len* is smaller than
+ the current length of *vec*, any entries whose indices are at least *len*
+ are truncated. That is, the number of nonzeros can change.
+
+.. function:: slong gr_sparse_vec_nnz(const gr_sparse_vec_t vec)
+
+ Get the number of nonzeros in *vec*.
+
+.. function:: void gr_sparse_vec_fit_nnz(gr_sparse_vec_t vec, slong nnz, gr_ctx_t ctx)
+
+ Ensure that *vec* has enough storage to hold at least *nnz* nonzeros. This does
+ not change the length of the vector or the number of nonzeros stored.
+
+.. function:: void gr_sparse_vec_shrink_to_nnz(gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Reallocate the storage in *vec* down the current number of nonzeros.
+
+.. function:: int gr_sparse_vec_from_entries(gr_sparse_vec_t vec, ulong * inds, gr_srcptr entries, slong nnz, truth_t is_canonical, gr_ctx_t ctx)
+
+ Construct *vec* from the sparse data given by *inds* and *entries* of length *nnz*.
+ If ``is_canonical`` is `T_TRUE`, the indices are assumed to be sorted and unique, and
+ the entries are assumed to be nonzero. Otherwise, the function will sort and compress
+ the entries to leave the vector in sorted, unique, nonzero form.
+
+.. function:: int gr_sparse_vec_randtest(gr_sparse_vec_t vec, slong nnz, int replacement, flint_rand_t state, gr_ctx_t ctx)
+
+ Initialize *vec* to a random vector with ``nnz`` nonzero columns, sampled
+ either with or without replacement. For small values of ``nnz`` (below
+ ``1/sqrt(length)``), sampling with replacement is faster and likely to
+ give the right number of nonzeroes. For larger values of ``nnz``, this
+ function uses reservoir sampling to get the specified number of nonzeroes
+ with uniform sampling. The entry at each column so sampled
+ is generated using ``gr_randtest_nonzero``.
+
+.. function:: int gr_sparse_vec_randtest(gr_sparse_vec_t vec, double prob, int replacement, flint_rand_t state, gr_ctx_t ctx)
+
+ Initialize *vec* to a random vector, in which each column is independently
+ sampled with probability ``prob``. The entry at each column so sampled
+ is generated using ``gr_randtest_nonzero``.
+
+
+Getting, setting and conversion
+--------------------------------------------------------------------------------
+
+.. macro:: GR_SPARSE_VEC_IND(vec, i)
+
+ Macro to access the index of the *i*-th nonzero.
+ The index must be in bounds (between 0 and ``vec->nnz``).
+
+.. macro:: GR_SPARSE_VEC_ENTRY(vec, i, sz)
+
+ Access the value of the *i*-th nonzero.
+ The index must be in bounds (between 0 and ``vec->nnz``).
+ (See also `gr_sparse_vec_get_entry`)
+
+.. function:: ulong * gr_sparse_vec_ind_ptr(gr_sparse_vec_t vec, slong i, gr_ctx_t ctx)
+ const ulong * gr_sparse_vec_ind_srcptr(const gr_sparse_vec_t vec, slong i, gr_ctx_t ctx)
+ gr_ptr gr_sparse_vec_entry_ptr(gr_sparse_vec_t vec, slong i, gr_ctx_t ctx)
+ gr_srcptr gr_sparse_vec_entry_srcptr(const gr_sparse_vec_t vec, slong i, gr_ctx_t ctx)
+
+ Get pointers to the index or value of the *i*th nonzero. Unlike the above
+ macros, these perform bounds checking, and return NULL if out of bounds.
+
+.. function:: gr_ptr gr_sparse_vec_find_entry(gr_sparse_vec_t vec, slong ind, gr_ctx_t ctx)
+
+ Return a pointer to the entry at index *ind*. If *ind* is not a index
+ in which *vec* contains a nonzero (including if it is out of bounds),
+ the function returns NULL. This is performed via binary search, so
+ is worst-case logarithmic time.
+
+.. function:: int gr_sparse_vec_get_entry(gr_ptr res, gr_sparse_vec_t vec, slong ind, gr_ctx_t ctx)
+
+ Set *res* to be a copy of the entry at index *ind*. If *ind* is not a index
+ in which *vec* contains a nonzero, *res* is set to zero. This is performed via binary search, so
+ is worst-case logarithmic time.
+
+.. function:: int gr_sparse_vec_set_entry(gr_sparse_vec_t vec, slong ind, gr_srcptr entry, gr_ctx_t ctx)
+
+ Set the the value at index *ind* to be *entry*. Because of the way sparse
+ vectors are represented, it is not efficient to call this function
+ repeatedly (it is worst-case linear time in the number of nonzeros in *vec*).
+ In such cases, it is preferrable to construct a new sparse vector from the entries
+ (using `gr_sparse_vec_set_from_entries` or `gr_sparse_vec_set_from_entries_sorted_deduped`)
+ passed to `gr_sparse_vec_update`.
+
+.. function:: int gr_sparse_vec_set(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+
+ Set *dst* to a copy of *src*.
+
+.. function:: int gr_sparse_vec_slice(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong ind_start, slong ind_end, gr_ctx_t ctx)
+
+ Set *dst* to a copy of the slice of *src* given by any entries whose
+ indices lie in the half open interval ``[ind_start, ind_end)``.
+ Column indices are shifted by *ind_start* (a index of ``ind_start``
+ would become ``0``).
+
+.. function:: int gr_sparse_vec_set_vec(gr_sparse_vec_t dst, gr_srcptr src, slong len, gr_ctx_t ctx)
+
+ Set *dst* from the (raw) dense vector *src* of length *len*.
+
+.. function:: int gr_vec_set_sparse_vec(gr_ptr dst, gr_sparse_vec_t src, gr_ctx_t ctx)
+
+ Set the raw vector *dst* from sparse vector *src*. *dst* must have
+ sufficient space (i.e., ``vec->length``).
+
+.. function:: void gr_sparse_vec_swap(gr_sparse_vec_t vec1, gr_sparse_vec_t vec2, gr_ctx_t ctx)
+
+ Swap the sparse vectors *vec1* and *vec2*. The entries are swapped as pointers, so
+ no additional allociation or copying is performed.
+
+.. function:: int gr_sparse_vec_zero(gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Set *vec* to the zero vector.
+
+.. function:: int gr_sparse_vec_one(gr_sparse_vec_t vec, slong ind, gr_ctx_t ctx)
+
+ Set *vec* to the elementary vector with a one at index *ind*.
+
+.. function:: int gr_sparse_vec_permute_inds(gr_sparse_vec_t vec, const gr_sparse_vec_t src, slong * p, gr_ctx_t ctx)
+
+ Set *vec* to a copy of *src* with the indices permuted. The
+ indices are shifted as: ``vec[p[i]] = src[i]``.
+
+Comparison
+--------------------------------------------------------------------------------
+
+.. function:: truth_t gr_sparse_vec_equal(const gr_sparse_vec_t vec1, const gr_sparse_vec_t vec2, gr_ctx_t ctx)
+
+ Returns ``T_TRUE`` if *vec1* and *vec2* represent the same vector and ``T_FALSE`` otherwise.
+
+.. function:: truth_t gr_sparse_vec_is_zero(const gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Return ``T_TRUE`` if *vec* represents the zero vector and ``T_FALSE`` otherwise.
+
+
+Output
+--------------------------------------------------------------------------------
+
+.. function:: int gr_sparse_vec_write_nz(gr_stream_t out, const gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Write the nonzeros of *vec* to the stream *out*. See ``gr_vec_set_sparse_vec``
+ if it is desired to print out the entire vector, zeros and all.
+
+.. function:: int gr_sparse_vec_print_nz(const gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Print the nonzeros of *vec* to ``stdout``. See ``gr_vec_set_sparse_vec``
+ if it is desired to print out the entire vector, zeros and all.
+
+
+Arithmetic
+--------------------------------------------------------------------------------
+
+.. function:: int gr_sparse_vec_neg(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+
+ Set *dst* to -*src*
+
+.. function:: int gr_sparse_vec_update(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+
+ Update *dst* with the nonzeros in *src*. That is, any index in *dst* which also appear
+ in *src* are overwritten with their values in *src*. Any indices in *dst* which do
+ not appear in *src* are left unchanged.
+
+.. function:: int gr_sparse_vec_add(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, slong len, gr_ctx_t ctx)
+ int gr_sparse_vec_sub(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, slong len, gr_ctx_t ctx)
+ int gr_sparse_vec_mul(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, slong len, gr_ctx_t ctx)
+
+ Componentwise operations. (We do not provide analogous division or exponentiation
+ routines due since sparse inputs to these operations would be undefined or
+ fully dense.)
+
+.. function:: int gr_sparse_vec_add_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx)
+ int gr_sparse_vec_sub_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx)
+ int gr_sparse_vec_mul_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx)
+
+ Componentwise operations where the second input is allowed to have a different ring.
+
+.. function:: int gr_other_add_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+ int gr_other_sub_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+ int gr_other_mul_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+
+ Componentwise operations where the first input is allowed to have a different ring.
+
+.. function:: int gr_sparse_vec_addmul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_sparse_vec_submul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_sparse_vec_addmul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+ int gr_sparse_vec_submul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+
+ Componentwise add and sub mul, with different options for the scalar.
+
+
+Arithmetic into dense vectors
+--------------------------------------------------------------------------------
+
+.. function:: int gr_vec_update_sparse_vec_nz(gr_ptr dres, const gr_sparse_vec_t src, gr_ctx_t ctx)
+ int gr_vec_add_sparse_vec(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+ int gr_vec_sub_sparse_vec(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+ int gr_vec_mul_sparse_vec_nz(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+ int gr_vec_div_sparse_vec_nz(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+ int gr_vec_addmul_sparse_vec_scalar(gr_ptr dres, const gr_sparse_vec_t svec, gr_srcptr c, gr_ctx_t ctx)
+ int gr_vec_submul_sparse_vec_scalar(gr_ptr dres, const gr_sparse_vec_t svec, gr_srcptr c, gr_ctx_t ctx)
+ int gr_vec_addmul_sparse_vec_scalar_si(gr_ptr dres, const gr_sparse_vec_t svec, slong c, gr_ctx_t ctx)
+ int gr_vec_submul_sparse_vec_scalar_si(gr_ptr dres, const gr_sparse_vec_t svec, slong c, gr_ctx_t ctx)
+ int gr_vec_addmul_sparse_vec_scalar_fmpz(gr_ptr dres, const gr_sparse_vec_t svec, const fmpz_t c, gr_ctx_t ctx)
+ int gr_vec_submul_sparse_vec_scalar_fmpz(gr_ptr dres, const gr_sparse_vec_t svec, const fmpz_t c, gr_ctx_t ctx)
+
+ These functions facilitate accumulating a sparse vector into a dense
+ target. They have one dense input, one sparse input, and a dense output
+ (where the dense input and output are the same for the fused operations).
+ For all functions, it is assumed that *dres* and *dvec1* have the same
+ length as *svec* or *svec2*, as appropriate. All functions only modify
+ the locations in *dres* at which the sparse vector has a nonzero value:
+ in particular, the functions *gr_vec_mul_sparse_vec_nz* and
+ *gr_vec_div_sparse_vec_nz* behave very differently from their dense counterparts.
+
+
+Scalar multiplication and division
+--------------------------------------------------------------------------------
+
+.. function:: int gr_sparse_vec_mul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_sparse_vec_mul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+ int gr_sparse_vec_mul_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx)
+ int gr_sparse_vec_mul_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_sparse_vec_mul_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_sparse_vec_mul_scalar_2exp_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+ int gr_sparse_vec_div_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_sparse_vec_div_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+ int gr_sparse_vec_div_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx)
+ int gr_sparse_vec_div_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_sparse_vec_div_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx)
+ int gr_sparse_vec_divexact_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+ int gr_sparse_vec_divexact_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+ int gr_sparse_vec_divexact_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx)
+ int gr_sparse_vec_divexact_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx)
+ int gr_sparse_vec_divexact_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx)
+
+ Set *dst* to be *src* multiplied or divided by *c*.
+ (Addition and subtraction are not provided because they would create
+ dense output.)
+
+Sum and product
+--------------------------------------------------------------------------------
+
+.. function:: int gr_sparse_vec_sum(gr_ptr res, const gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Set *res* to the sum of the entries in *vec*.
+
+.. function:: int gr_sparse_vec_nz_product(gr_ptr res, const gr_sparse_vec_t vec, gr_ctx_t ctx)
+
+ Set *res* to the product of the nonzero entries in *vec*.
+
+
+Dot product
+--------------------------------------------------------------------------------
+
+.. function:: int gr_sparse_vec_dot(gr_ptr res, gr_srcptr c, int subtract, const gr_sparse_vec_t x, const gr_sparse_vec_t y, gr_ctx_t ctx)
+
+ Set *res* equal to `c \pm x \cdot y`.
+
+.. function:: int gr_sparse_vec_dot_vec(gr_ptr res, gr_srcptr c, int subtract, const gr_sparse_vec_t x, const gr_vec_t y, gr_ctx_t ctx)
+
+ Set *res* equal to `c \pm x \cdot y`.
+
+
+
+
+.. raw:: latex
+
+ \newpage
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 906090a8d0..633652f9ae 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -57,6 +57,7 @@ Generic rings
gr_generic.rst
gr_special.rst
gr_vec.rst
+ gr_sparse_vec.rst
gr_mat.rst
gr_poly.rst
gr_mpoly.rst
diff --git a/src/fmpz_mod_mat/det.c b/src/fmpz_mod_mat/det.c
index d0b9844247..d015cd56b1 100644
--- a/src/fmpz_mod_mat/det.c
+++ b/src/fmpz_mod_mat/det.c
@@ -39,4 +39,4 @@ void fmpz_mod_mat_det(fmpz_t res, const fmpz_mod_mat_t mat, const fmpz_mod_ctx_t
GR_MUST_SUCCEED(gr_mat_det_berkowitz(res, (const gr_mat_struct *) mat, gr_ctx));
}
}
-}
\ No newline at end of file
+}
diff --git a/src/gr_mat.h b/src/gr_mat.h
index b14430506d..7a476c1fb6 100644
--- a/src/gr_mat.h
+++ b/src/gr_mat.h
@@ -101,6 +101,8 @@ WARN_UNUSED_RESULT int gr_mat_randops(gr_mat_t mat, flint_rand_t state, slong co
WARN_UNUSED_RESULT int gr_mat_randpermdiag(int * parity, gr_mat_t mat, flint_rand_t state, gr_ptr diag, slong n, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_randrank(gr_mat_t mat, flint_rand_t state, slong rank, gr_ctx_t ctx);
+#define gr_mat_is_compatible(mat1, mat2, ctx) (((mat1)->r == (mat2)->r && (mat1)->c == (mat2)->c) ? T_TRUE : T_FALSE)
+
GR_MAT_INLINE truth_t
gr_mat_is_empty(const gr_mat_t mat, gr_ctx_t FLINT_UNUSED(ctx))
{
@@ -137,17 +139,50 @@ WARN_UNUSED_RESULT int gr_mat_sub(gr_mat_t res, const gr_mat_t mat1, const gr_ma
/* todo: test, wrap; div; more conversions */
WARN_UNUSED_RESULT int gr_mat_add_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_add_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_add_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_add_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_add_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx);
+
WARN_UNUSED_RESULT int gr_mat_sub_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_sub_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_sub_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_sub_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_sub_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx);
+
WARN_UNUSED_RESULT int gr_mat_mul_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_mul_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_mul_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_mul_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_mul_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_mul_scalar_2exp_si(gr_mat_t res, const gr_mat_t mat, slong e, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_mat_div_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_div_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_div_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_div_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_div_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_mat_divexact_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_divexact_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_divexact_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_divexact_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_divexact_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx);
+
WARN_UNUSED_RESULT int gr_mat_addmul_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_submul_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
-WARN_UNUSED_RESULT int gr_mat_div_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_mat_mul_vec(gr_ptr v, const gr_mat_t A, gr_srcptr u, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_mul_classical(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_mul_strassen(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_mul_generic(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_mul(gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_addmul_classical(gr_mat_t D, const gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_addmul_generic(gr_mat_t D, const gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_addmul(gr_mat_t D, const gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
+
/* todo */
GR_MAT_INLINE WARN_UNUSED_RESULT int
gr_mat_sqr(gr_mat_t res, const gr_mat_t mat, gr_ctx_t ctx)
@@ -216,6 +251,27 @@ WARN_UNUSED_RESULT int gr_mat_hilbert(gr_mat_t mat, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_hadamard(gr_mat_t mat, gr_ctx_t ctx);
/* todo: dft, dct */
+#define _GR_MAT_SHALLOW_TRANSPOSE(AT, A, sz) { \
+ for (j = 0; j < A->c; j++) \
+ { \
+ for (i = 0; i < A->r; i++) \
+ { \
+ set_shallow(GR_MAT_ENTRY(AT, j, i, sz), GR_MAT_ENTRY(A, i, j, sz), ctx); \
+ } \
+ } \
+}
+
+#define _GR_MAT_INIT_SHALLOW_TRANSPOSE(AT, A, sz) { \
+ AT->r = A->c; \
+ AT->c = A->r; \
+ AT->rows = flint_malloc(AT->r * sizeof(gr_ptr)); \
+ AT->entries = TMP_ALLOC(sz * A->c * A->r); \
+ for (j = 0; j < A->c; j++) \
+ AT->rows[j] = GR_ENTRY(AT->entries, j * A->r, sz); \
+ _GR_MAT_SHALLOW_TRANSPOSE(AT, A, sz) \
+}
+
+
WARN_UNUSED_RESULT int gr_mat_transpose(gr_mat_t B, const gr_mat_t A, gr_ctx_t ctx);
WARN_UNUSED_RESULT int gr_mat_nonsingular_solve_tril_classical(gr_mat_t X, const gr_mat_t L, const gr_mat_t B, int unit, gr_ctx_t ctx);
diff --git a/src/gr_mat/add_scalar.c b/src/gr_mat/add_scalar.c
index caef02d18e..17800bdff1 100644
--- a/src/gr_mat/add_scalar.c
+++ b/src/gr_mat/add_scalar.c
@@ -11,35 +11,56 @@
#include "gr_mat.h"
-int
-gr_mat_add_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
-{
- slong i, j, r, c;
- slong sz = ctx->sizeof_elem;
- int status = GR_SUCCESS;
-
- r = gr_mat_nrows(res, ctx);
- c = gr_mat_ncols(res, ctx);
-
- if (res == mat)
- {
- for (i = 0; i < FLINT_MIN(r, c); i++)
- status |= gr_add(GR_MAT_ENTRY(res, i, i, sz), GR_MAT_ENTRY(res, i, i, sz), x, ctx);
- }
- else
- {
- for (i = 0; i < r; i++)
- {
- for (j = 0; j < c; j++)
- {
- /* todo: vectorize */
- if (i == j)
- status |= gr_add(GR_MAT_ENTRY(res, i, j, sz), GR_MAT_ENTRY(mat, i, j, sz), x, ctx);
- else
- status |= gr_set(GR_MAT_ENTRY(res, i, j, sz), GR_MAT_ENTRY(mat, i, j, sz), ctx);
- }
- }
- }
-
- return status;
-}
+#define GR_MAT_ADD_SCALAR(FUNC, res, mat, x, ctx) \
+ slong i, j, r, c, sz = (ctx)->sizeof_elem; \
+ int status = GR_SUCCESS; \
+ r = gr_mat_nrows(res, ctx); \
+ c = gr_mat_ncols(res, ctx); \
+ if (res == mat) \
+ { \
+ for (i = 0; i < FLINT_MIN(r, c); i++) \
+ status |= (FUNC)( \
+ GR_MAT_ENTRY(res, i, i, sz), \
+ GR_MAT_ENTRY(res, i, i, sz), \
+ x, ctx \
+ ); \
+ } \
+ else \
+ { \
+ for (i = 0; i < r; i++) \
+ { \
+ for (j = 0; j < c; j++) \
+ { \
+ /* todo: vectorize */ \
+ if (i == j) \
+ status |= (FUNC)( \
+ GR_MAT_ENTRY(res, i, j, sz), \
+ GR_MAT_ENTRY(mat, i, j, sz), \
+ x, ctx \
+ ); \
+ else \
+ status |= gr_set( \
+ GR_MAT_ENTRY(res, i, j, sz), \
+ GR_MAT_ENTRY(mat, i, j, sz), \
+ ctx \
+ ); \
+ } \
+ } \
+ } \
+ return status; \
+
+
+int gr_mat_add_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
+{ GR_MAT_ADD_SCALAR(gr_add, res, mat, x, ctx) }
+
+int gr_mat_add_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx)
+{ GR_MAT_ADD_SCALAR(gr_add_si, res, mat, x, ctx) }
+
+int gr_mat_add_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx)
+{ GR_MAT_ADD_SCALAR(gr_add_ui, res, mat, x, ctx) }
+
+int gr_mat_add_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx)
+{ GR_MAT_ADD_SCALAR(gr_add_fmpz, res, mat, x, ctx) }
+
+int gr_mat_add_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx)
+{ GR_MAT_ADD_SCALAR(gr_add_fmpq, res, mat, x, ctx) }
diff --git a/src/gr_mat/addmul.c b/src/gr_mat/addmul.c
new file mode 100644
index 0000000000..7dc4a06182
--- /dev/null
+++ b/src/gr_mat/addmul.c
@@ -0,0 +1,24 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_mat.h"
+
+int
+gr_mat_addmul_generic(gr_mat_t D, const gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+{
+ return gr_mat_mul_classical(C, A, B, ctx);
+}
+
+int
+gr_mat_addmul(gr_mat_t D, const gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+{
+ return gr_mat_addmul_generic(D, C, A, B, ctx);
+}
diff --git a/src/gr_mat/addmul_classical.c b/src/gr_mat/addmul_classical.c
new file mode 100644
index 0000000000..dc97d4ec49
--- /dev/null
+++ b/src/gr_mat/addmul_classical.c
@@ -0,0 +1,117 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_vec.h"
+#include "gr_mat.h"
+
+int
+gr_mat_addmul_classical(gr_mat_t D, const gr_mat_t C, const gr_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+{
+ slong ar, ac, br, bc, i, j, sz;
+ int status;
+
+ ar = gr_mat_nrows(A, ctx);
+ ac = gr_mat_ncols(A, ctx);
+ br = gr_mat_nrows(B, ctx);
+ bc = gr_mat_ncols(B, ctx);
+
+ if (gr_mat_is_compatible(C, D, ctx) == T_FALSE || ac != br || ar != gr_mat_nrows(C, ctx) || bc != gr_mat_ncols(C, ctx))
+ return GR_DOMAIN;
+
+ if (br == 0)
+ {
+ return GR_SUCCESS;
+ }
+
+ status = GR_SUCCESS;
+
+ if (A == D || B == D)
+ {
+ gr_mat_t T;
+ gr_mat_init(T, ar, bc, ctx);
+ status |= gr_mat_addmul_classical(T, C, A, B, ctx);
+ status |= gr_mat_swap_entrywise(T, D, ctx);
+ gr_mat_clear(T, ctx);
+ return status;
+ }
+
+ sz = ctx->sizeof_elem;
+
+ if (br == 1)
+ {
+ for (i = 0; i < ar; i++)
+ {
+ for (j = 0; j < bc; j++)
+ {
+ if (C != D)
+ status |= gr_set(GR_MAT_ENTRY(D, i, j, sz), GR_MAT_ENTRY(C, i, j, sz), ctx);
+ status |= gr_addmul(GR_MAT_ENTRY(D, i, j, sz),
+ GR_MAT_ENTRY(A, i, 0, sz),
+ GR_MAT_ENTRY(B, 0, j, sz), ctx);
+ }
+ }
+ }
+ else
+ {
+ gr_ptr tmp;
+ gr_method_void_unary_op set_shallow = GR_VOID_UNARY_OP(ctx, SET_SHALLOW);
+ TMP_INIT;
+
+ TMP_START;
+ tmp = TMP_ALLOC(sz * br * bc);
+
+ /* Make a shallow transpose so that we can use dot products.
+ Inline common sizes. (Caution: are we sure about the alignment?
+ Some asserts would be nice here.)
+ Todo: we may want inlining in nonsingular_solve etc. as well. */
+ for (i = 0; i < br; i++)
+ {
+ for (j = 0; j < bc; j++)
+ {
+ switch (sz)
+ {
+#if 0
+ case 1:
+ ((int8_t *) GR_ENTRY(tmp, j * br + i, 1))[0] = ((int8_t *) GR_MAT_ENTRY(B, i, j, 1))[0];
+ break;
+ case 2:
+ ((int16_t *) GR_ENTRY(tmp, j * br + i, 2))[0] = ((int16_t *) GR_MAT_ENTRY(B, i, j, 2))[0];
+ break;
+ case 4:
+ ((int32_t *) GR_ENTRY(tmp, j * br + i, 4))[0] = ((int32_t *) GR_MAT_ENTRY(B, i, j, 4))[0];
+ break;
+#if FLINT_BITS == 64
+ case 8:
+ ((int64_t *) GR_ENTRY(tmp, j * br + i, 8))[0] = ((int64_t *) GR_MAT_ENTRY(B, i, j, 8))[0];
+ break;
+#endif
+#endif
+ default:
+ set_shallow(GR_ENTRY(tmp, j * br + i, sz), GR_MAT_ENTRY(B, i, j, sz), ctx);
+ }
+ }
+ }
+
+ for (i = 0; i < ar; i++)
+ {
+ for (j = 0; j < bc; j++)
+ {
+ status |= _gr_vec_dot(GR_MAT_ENTRY(D, i, j, sz), GR_MAT_ENTRY(C, i, j, sz), 0,
+ GR_MAT_ENTRY(A, i, 0, sz), GR_ENTRY(tmp, j * br, sz), br, ctx);
+ }
+ }
+
+ TMP_END;
+ }
+
+ return status;
+}
diff --git a/src/gr_mat/div_scalar.c b/src/gr_mat/div_scalar.c
index 4567e562f2..42d2911428 100644
--- a/src/gr_mat/div_scalar.c
+++ b/src/gr_mat/div_scalar.c
@@ -10,22 +10,36 @@
*/
#include "gr_mat.h"
+#include "gr_vec.h"
-/* todo: use a vector function; preinvert when appropriate */
-int
-gr_mat_div_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
-{
- slong i, j, r, c;
- int status = GR_SUCCESS;
- slong sz = ctx->sizeof_elem;
+/* preinvert when appropriate */
- r = gr_mat_nrows(res, ctx);
- c = gr_mat_ncols(res, ctx);
+#define GR_MAT_DIV_SCALAR(SCALAR_TYPE, res, mat, x, ctx) \
+ slong i, r, c; \
+ int status = GR_SUCCESS; \
+ r = gr_mat_nrows(res, ctx); \
+ c = gr_mat_ncols(res, ctx); \
+ if (c != 0) \
+ for (i = 0; i < r; i++) \
+ status |= _gr_vec_div_##SCALAR_TYPE( \
+ res->rows[i], \
+ mat->rows[i], \
+ c, x, ctx \
+ ); \
+ return status; \
- if (c != 0)
- for (i = 0; i < r; i++)
- for (j = 0; j < c; j++)
- status |= gr_div(GR_MAT_ENTRY(res, i, j, sz), GR_MAT_ENTRY(mat, i, j, sz), x, ctx);
- return status;
-}
+int gr_mat_div_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
+{ GR_MAT_DIV_SCALAR(scalar, res, mat, x, ctx); }
+
+int gr_mat_div_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx)
+{ GR_MAT_DIV_SCALAR(scalar_si, res, mat, x, ctx); }
+
+int gr_mat_div_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx)
+{ GR_MAT_DIV_SCALAR(scalar_ui, res, mat, x, ctx); }
+
+int gr_mat_div_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx)
+{ GR_MAT_DIV_SCALAR(scalar_fmpz, res, mat, x, ctx); }
+
+int gr_mat_div_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx)
+{ GR_MAT_DIV_SCALAR(scalar_fmpq, res, mat, x, ctx); }
diff --git a/src/gr_mat/divexact_scalar.c b/src/gr_mat/divexact_scalar.c
new file mode 100644
index 0000000000..6526f13647
--- /dev/null
+++ b/src/gr_mat/divexact_scalar.c
@@ -0,0 +1,45 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_mat.h"
+#include "gr_vec.h"
+
+/* preinvert when appropriate */
+
+#define GR_MAT_DIVEXACT_SCALAR(SCALAR_TYPE, res, mat, x, ctx) \
+ slong i, r, c; \
+ int status = GR_SUCCESS; \
+ r = gr_mat_nrows(res, ctx); \
+ c = gr_mat_ncols(res, ctx); \
+ if (c != 0) \
+ for (i = 0; i < r; i++) \
+ status |= _gr_vec_divexact_##SCALAR_TYPE( \
+ res->rows[i], \
+ mat->rows[i], \
+ c, x, ctx \
+ ); \
+ return status; \
+
+
+int gr_mat_divexact_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
+{ GR_MAT_DIVEXACT_SCALAR(scalar, res, mat, x, ctx); }
+
+int gr_mat_divexact_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx)
+{ GR_MAT_DIVEXACT_SCALAR(scalar_si, res, mat, x, ctx); }
+
+int gr_mat_divexact_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx)
+{ GR_MAT_DIVEXACT_SCALAR(scalar_ui, res, mat, x, ctx); }
+
+int gr_mat_divexact_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx)
+{ GR_MAT_DIVEXACT_SCALAR(scalar_fmpz, res, mat, x, ctx); }
+
+int gr_mat_divexact_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx)
+{ GR_MAT_DIVEXACT_SCALAR(scalar_fmpq, res, mat, x, ctx); }
diff --git a/src/gr_mat/mul_scalar.c b/src/gr_mat/mul_scalar.c
index 74e39ab6a9..422fa7e1a6 100644
--- a/src/gr_mat/mul_scalar.c
+++ b/src/gr_mat/mul_scalar.c
@@ -12,18 +12,36 @@
#include "gr_vec.h"
#include "gr_mat.h"
-int
-gr_mat_mul_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
-{
- slong i, r, c;
- int status = GR_SUCCESS;
- r = gr_mat_nrows(res, ctx);
- c = gr_mat_ncols(res, ctx);
+#define GR_MAT_MUL_SCALAR(SCALAR_TYPE, res, mat, x, ctx) \
+ slong i, r, c; \
+ int status = GR_SUCCESS; \
+ r = gr_mat_nrows(res, ctx); \
+ c = gr_mat_ncols(res, ctx); \
+ if (c != 0) \
+ for (i = 0; i < r; i++) \
+ status |= _gr_vec_mul_##SCALAR_TYPE( \
+ res->rows[i], \
+ mat->rows[i], \
+ c, x, ctx \
+ ); \
+ return status; \
- if (c != 0)
- for (i = 0; i < r; i++)
- status |= _gr_vec_mul_scalar(res->rows[i], mat->rows[i], c, x, ctx);
- return status;
-}
+int gr_mat_mul_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
+{ GR_MAT_MUL_SCALAR(scalar, res, mat, x, ctx); }
+
+int gr_mat_mul_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx)
+{ GR_MAT_MUL_SCALAR(scalar_si, res, mat, x, ctx); }
+
+int gr_mat_mul_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx)
+{ GR_MAT_MUL_SCALAR(scalar_ui, res, mat, x, ctx); }
+
+int gr_mat_mul_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx)
+{ GR_MAT_MUL_SCALAR(scalar_fmpz, res, mat, x, ctx); }
+
+int gr_mat_mul_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx)
+{ GR_MAT_MUL_SCALAR(scalar_fmpq, res, mat, x, ctx); }
+
+int gr_mat_mul_scalar_2exp_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx)
+{ GR_MAT_MUL_SCALAR(scalar_2exp_si, res, mat, x, ctx); }
diff --git a/src/gr_mat/mul_vec.c b/src/gr_mat/mul_vec.c
new file mode 100644
index 0000000000..1ec06394eb
--- /dev/null
+++ b/src/gr_mat/mul_vec.c
@@ -0,0 +1,39 @@
+
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_mat.h"
+#include "gr_vec.h"
+
+int gr_mat_mul_vec(gr_ptr v, const gr_mat_t A, gr_srcptr u, gr_ctx_t ctx)
+{
+ slong r, c, row, sz;
+ gr_ptr w;
+ int status;
+
+ sz = ctx->sizeof_elem;
+ r = gr_mat_nrows(A, ctx);
+ c = gr_mat_ncols(A, ctx);
+
+ if (u == v)
+ {
+ GR_TMP_INIT_VEC(w, r, ctx);
+ _gr_vec_init(w, r, ctx);
+ status = gr_mat_mul_vec(w, A, u, ctx);
+ _gr_vec_swap(v, w, r, ctx);
+ GR_TMP_CLEAR_VEC(w, r, ctx);
+ return status;
+ }
+ status = _gr_vec_zero(v, r, ctx);
+ for (row = 0; row < r; ++row)
+ status |= _gr_vec_dot(GR_ENTRY(v, row, sz), GR_ENTRY(v, row, sz), 0, A->rows[row], u, c, ctx);
+ return status;
+}
diff --git a/src/gr_mat/sub_scalar.c b/src/gr_mat/sub_scalar.c
index 597cc3f249..179a60d356 100644
--- a/src/gr_mat/sub_scalar.c
+++ b/src/gr_mat/sub_scalar.c
@@ -11,35 +11,57 @@
#include "gr_mat.h"
-int
-gr_mat_sub_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
-{
- slong i, j, r, c;
- slong sz = ctx->sizeof_elem;
- int status = GR_SUCCESS;
-
- r = gr_mat_nrows(res, ctx);
- c = gr_mat_ncols(res, ctx);
-
- if (res == mat)
- {
- for (i = 0; i < FLINT_MIN(r, c); i++)
- status |= gr_sub(GR_MAT_ENTRY(res, i, i, sz), GR_MAT_ENTRY(res, i, i, sz), x, ctx);
- }
- else
- {
- for (i = 0; i < r; i++)
- {
- for (j = 0; j < c; j++)
- {
- /* todo: vectorize */
- if (i == j)
- status |= gr_sub(GR_MAT_ENTRY(res, i, j, sz), GR_MAT_ENTRY(mat, i, j, sz), x, ctx);
- else
- status |= gr_set(GR_MAT_ENTRY(res, i, j, sz), GR_MAT_ENTRY(mat, i, j, sz), ctx);
- }
- }
- }
-
- return status;
-}
+
+#define GR_MAT_SUB_SCALAR(FUNC, res, mat, x, ctx) \
+ slong i, j, r, c, sz = (ctx)->sizeof_elem; \
+ int status = GR_SUCCESS; \
+ r = gr_mat_nrows(res, ctx); \
+ c = gr_mat_ncols(res, ctx); \
+ if (res == mat) \
+ { \
+ for (i = 0; i < FLINT_MIN(r, c); i++) \
+ status |= (FUNC)( \
+ GR_MAT_ENTRY(res, i, i, sz), \
+ GR_MAT_ENTRY(res, i, i, sz), \
+ x, ctx \
+ ); \
+ } \
+ else \
+ { \
+ for (i = 0; i < r; i++) \
+ { \
+ for (j = 0; j < c; j++) \
+ { \
+ /* todo: vectorize */ \
+ if (i == j) \
+ status |= (FUNC)( \
+ GR_MAT_ENTRY(res, i, j, sz), \
+ GR_MAT_ENTRY(mat, i, j, sz), \
+ x, ctx \
+ ); \
+ else \
+ status |= gr_set( \
+ GR_MAT_ENTRY(res, i, j, sz), \
+ GR_MAT_ENTRY(mat, i, j, sz), \
+ ctx \
+ ); \
+ } \
+ } \
+ } \
+ return status; \
+
+
+int gr_mat_sub_scalar(gr_mat_t res, const gr_mat_t mat, gr_srcptr x, gr_ctx_t ctx)
+{ GR_MAT_SUB_SCALAR(gr_sub, res, mat, x, ctx) }
+
+int gr_mat_sub_scalar_si(gr_mat_t res, const gr_mat_t mat, slong x, gr_ctx_t ctx)
+{ GR_MAT_SUB_SCALAR(gr_sub_si, res, mat, x, ctx) }
+
+int gr_mat_sub_scalar_ui(gr_mat_t res, const gr_mat_t mat, ulong x, gr_ctx_t ctx)
+{ GR_MAT_SUB_SCALAR(gr_sub_ui, res, mat, x, ctx) }
+
+int gr_mat_sub_scalar_fmpz(gr_mat_t res, const gr_mat_t mat, fmpz_t x, gr_ctx_t ctx)
+{ GR_MAT_SUB_SCALAR(gr_sub_fmpz, res, mat, x, ctx) }
+
+int gr_mat_sub_scalar_fmpq(gr_mat_t res, const gr_mat_t mat, fmpq_t x, gr_ctx_t ctx)
+{ GR_MAT_SUB_SCALAR(gr_sub_fmpq, res, mat, x, ctx) }
diff --git a/src/gr_sparse_mat.h b/src/gr_sparse_mat.h
new file mode 100644
index 0000000000..5772f91398
--- /dev/null
+++ b/src/gr_sparse_mat.h
@@ -0,0 +1,987 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#ifndef GR_SPARSE_MAT_H
+#define GR_SPARSE_MAT_H
+
+#ifdef GR_SPARSE_MAT_INLINES_C
+#define GR_SPARSE_MAT_INLINE
+#else
+#define GR_SPARSE_MAT_INLINE static inline
+#endif
+
+#include "gr.h"
+#include "gr_mat.h"
+#include "gr_sparse_vec.h"
+#include "gr_poly.h"
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/**
+ * Types and basic access
+**/
+
+typedef struct
+{
+ slong r;
+ slong c;
+ slong nnz;
+ slong alloc;
+ ulong * rows;
+ ulong * cols;
+ gr_ptr nzs;
+}
+gr_csr_mat_struct;
+
+typedef gr_csr_mat_struct gr_csr_mat_t[1];
+
+typedef struct
+{
+ slong r;
+ slong c;
+ slong nnz;
+ gr_sparse_vec_struct * rows;
+}
+gr_lil_mat_struct;
+
+typedef gr_lil_mat_struct gr_lil_mat_t[1];
+
+typedef struct
+{
+ slong r;
+ slong c;
+ slong nnz;
+ slong alloc;
+ ulong * rows;
+ ulong * cols;
+ gr_ptr nzs;
+ truth_t is_canonical;
+}
+gr_coo_mat_struct;
+
+typedef gr_coo_mat_struct gr_coo_mat_t[1];
+
+#define gr_sparse_mat_nrows(mat, ctx) ((mat)->r)
+#define gr_sparse_mat_ncols(mat, ctx) ((mat)->c)
+#define gr_sparse_mat_nnz(mat, ctx) ((mat)->nnz)
+
+#define GR_CSR_MAT_COL(mat,row,nz_idx) ((mat)->cols + (mat)->rows[row] + nz_idx)
+#define GR_CSR_MAT_ENTRY(mat,row,nz_idx,sz) GR_ENTRY((mat)->nzs, (mat)->rows[row] + nz_idx, sz)
+
+#define GR_LIL_MAT_COL(mat,row,nz_idx) ((mat)->rows[row].inds + nz_idx)
+#define GR_LIL_MAT_ENTRY(mat,row,nz_idx,sz) GR_ENTRY((mat)->rows[row].nzs, nz_idx, sz)
+
+#define GR_COO_MAT_ROW(mat,nz_idx) ((mat)->rows + nz_idx)
+#define GR_COO_MAT_COL(mat,nz_idx) ((mat)->cols + nz_idx)
+#define GR_COO_MAT_ENTRY(mat,nz_idx,sz) GR_ENTRY((mat)->nzs, nz_idx, sz)
+
+GR_SPARSE_MAT_INLINE ulong *
+gr_csr_mat_col_ptr(gr_csr_mat_t mat, slong row, slong nz_idx)
+{
+ return GR_CSR_MAT_COL(mat, row, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE const ulong *
+gr_csr_mat_col_srcptr(const gr_csr_mat_t mat, slong row, slong nz_idx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || mat->rows[row] + nz_idx >= mat->rows[row+1])
+ return NULL;
+ return GR_CSR_MAT_COL(mat, row, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE gr_ptr
+gr_csr_mat_entry_ptr(gr_csr_mat_t mat, slong row, slong nz_idx, gr_ctx_t ctx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || mat->rows[row] + nz_idx >= mat->rows[row+1])
+ return NULL;
+ return GR_CSR_MAT_ENTRY(mat, row, nz_idx, ctx->sizeof_elem);
+}
+
+GR_SPARSE_MAT_INLINE gr_srcptr
+gr_csr_mat_entry_srcptr(const gr_csr_mat_t mat, slong row, slong nz_idx, gr_ctx_t ctx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || mat->rows[row] + nz_idx >= mat->rows[row+1])
+ return NULL;
+ return GR_CSR_MAT_ENTRY(mat, row, nz_idx, ctx->sizeof_elem);
+}
+
+GR_SPARSE_MAT_INLINE ulong *
+gr_lil_mat_col_ptr(gr_lil_mat_t mat, slong row, slong nz_idx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || nz_idx >= mat->rows[row].nnz)
+ return NULL;
+ return GR_LIL_MAT_COL(mat, row, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE const ulong *
+gr_lil_mat_col_srcptr(const gr_lil_mat_t mat, slong row, slong nz_idx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || nz_idx >= mat->rows[row].nnz)
+ return NULL;
+ return GR_LIL_MAT_COL(mat, row, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE gr_ptr
+gr_lil_mat_entry_ptr(gr_lil_mat_t mat, slong row, slong nz_idx, gr_ctx_t ctx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || nz_idx >= mat->rows[row].nnz)
+ return NULL;
+ return GR_LIL_MAT_ENTRY(mat, row, nz_idx, ctx->sizeof_elem);
+}
+
+GR_SPARSE_MAT_INLINE gr_srcptr
+gr_lil_mat_entry_srcptr(const gr_lil_mat_t mat, slong row, slong nz_idx, gr_ctx_t ctx)
+{
+ if (row < 0 || row >= mat->r || nz_idx < 0 || nz_idx >= mat->rows[row].nnz)
+ return NULL;
+ return GR_LIL_MAT_ENTRY(mat, row, nz_idx, ctx->sizeof_elem);
+}
+
+GR_SPARSE_MAT_INLINE ulong *
+gr_coo_mat_row_ptr(gr_coo_mat_t mat, slong nz_idx)
+{
+ if (nz_idx < 0 || nz_idx >= mat->nnz)
+ return NULL;
+ return GR_COO_MAT_ROW(mat, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE const ulong *
+gr_coo_mat_row_srcptr(const gr_coo_mat_t mat, slong nz_idx)
+{
+ if (nz_idx < 0 || nz_idx >= mat->nnz)
+ return NULL;
+ return GR_COO_MAT_ROW(mat, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE const ulong *
+gr_coo_mat_col_ptr(gr_coo_mat_t mat, slong nz_idx)
+{
+ if (nz_idx < 0 || nz_idx >= mat->nnz)
+ return NULL;
+ return GR_COO_MAT_COL(mat, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE const ulong *
+gr_coo_mat_col_srcptr(const gr_coo_mat_t mat, slong nz_idx)
+{
+ if (nz_idx < 0 || nz_idx >= mat->nnz)
+ return NULL;
+ return GR_COO_MAT_COL(mat, nz_idx);
+}
+
+GR_SPARSE_MAT_INLINE gr_ptr
+gr_coo_mat_entry_ptr(gr_coo_mat_t mat, slong nz_idx, gr_ctx_t ctx)
+{
+ if (nz_idx < 0 || nz_idx >= mat->nnz)
+ return NULL;
+ return GR_COO_MAT_ENTRY(mat, nz_idx, ctx->sizeof_elem);
+}
+
+GR_SPARSE_MAT_INLINE gr_srcptr
+gr_coo_mat_entry_srcptr(const gr_coo_mat_t mat, slong nz_idx, gr_ctx_t ctx)
+{
+ if (nz_idx < 0 || nz_idx >= mat->nnz)
+ return NULL;
+ return GR_COO_MAT_ENTRY(mat, nz_idx, ctx->sizeof_elem);
+}
+
+/* Generics */
+/*
+typedef int ((*gr_method_mat_unary_op_get_scalar)(gr_ptr, const gr_sparse_mat_t, gr_ctx_ptr));
+typedef int ((*gr_method_mat_unary_op)(gr_sparse_mat_t, const gr_sparse_mat_t, gr_ctx_ptr));
+typedef int ((*gr_method_mat_binary_op)(gr_sparse_mat_t, const gr_sparse_mat_t, const gr_sparse_mat_t, gr_ctx_ptr));
+typedef int ((*gr_method_mat_pivot_op)(slong *, gr_sparse_mat_t, slong, slong, slong, gr_ctx_ptr));
+typedef int ((*gr_method_mat_diagonalization_op)(gr_vec_t, gr_sparse_mat_t, gr_sparse_mat_t, const gr_sparse_mat_t, int, gr_ctx_ptr));
+
+#define GR_SPARSE_MAT_UNARY_OP_GET_SCALAR(ctx, NAME) (((gr_method_mat_unary_op_get_scalar *) ctx->methods)[GR_METHOD_ ## NAME])
+#define GR_SPARSE_MAT_UNARY_OP(ctx, NAME) (((gr_method_mat_unary_op *) ctx->methods)[GR_METHOD_ ## NAME])
+#define GR_SPARSE_MAT_BINARY_OP(ctx, NAME) (((gr_method_mat_binary_op *) ctx->methods)[GR_METHOD_ ## NAME])
+#define GR_SPARSE_MAT_PIVOT_OP(ctx, NAME) (((gr_method_mat_pivot_op *) ctx->methods)[GR_METHOD_ ## NAME])
+#define GR_SPARSE_MAT_DIAGONALIZATION_OP(ctx, NAME) (((gr_method_mat_diagonalization_op *) ctx->methods)[GR_METHOD_ ## NAME])
+*/
+
+GR_SPARSE_MAT_INLINE void
+gr_csr_mat_init(gr_csr_mat_t mat, slong rows, slong cols, gr_ctx_t ctx) {
+ memset(mat, 0, sizeof(gr_csr_mat_t));
+ mat->r = rows;
+ mat->c = cols;
+ mat->rows = flint_calloc(rows + 1, sizeof(ulong));
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_lil_mat_init(gr_lil_mat_t mat, slong rows, slong cols, gr_ctx_t ctx) {
+ int row;
+
+ memset(mat, 0, sizeof(gr_lil_mat_t));
+ mat->r = rows;
+ mat->c = cols;
+ mat->rows = flint_calloc(rows, sizeof(gr_sparse_vec_struct));
+
+ for(row = 0; row < mat->r; ++row)
+ gr_sparse_vec_init(&mat->rows[row], cols, ctx);
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_coo_mat_init(gr_coo_mat_t mat, slong rows, slong cols, gr_ctx_t ctx) {
+ memset(mat, 0, sizeof(gr_coo_mat_t));
+ mat->r = rows;
+ mat->c = cols;
+ mat->is_canonical = T_TRUE;
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_csr_mat_clear(gr_csr_mat_t mat, gr_ctx_t ctx) {
+ if (mat->alloc != 0)
+ {
+ _gr_vec_clear(mat->nzs, mat->alloc, ctx);
+ flint_free(mat->nzs);
+ flint_free(mat->cols);
+ }
+ flint_free(mat->rows);
+ memset(mat, 0, sizeof(gr_csr_mat_t));
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_lil_mat_clear(gr_lil_mat_t mat, gr_ctx_t ctx) {
+ int row;
+
+ for (row = 0; row < mat->r; ++row)
+ gr_sparse_vec_clear(&mat->rows[row], ctx);
+ flint_free(mat->rows);
+ memset(mat, 0, sizeof(gr_lil_mat_t));
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_coo_mat_clear(gr_coo_mat_t mat, gr_ctx_t ctx) {
+ if (mat->alloc != 0)
+ {
+ _gr_vec_clear(mat->nzs, mat->alloc, ctx);
+ flint_free(mat->nzs);
+ flint_free(mat->rows);
+ flint_free(mat->cols);
+ }
+ memset(mat, 0, sizeof(gr_csr_mat_t));
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_csr_mat_swap(gr_csr_mat_t mat1, gr_csr_mat_t mat2, gr_ctx_t ctx)
+{
+ FLINT_SWAP(gr_csr_mat_struct, *mat1, *mat2);
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_lil_mat_swap(gr_lil_mat_t mat1, gr_lil_mat_t mat2, gr_ctx_t ctx)
+{
+ FLINT_SWAP(gr_lil_mat_struct, *mat1, *mat2);
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_coo_mat_swap(gr_coo_mat_t mat1, gr_coo_mat_t mat2, gr_ctx_t ctx)
+{
+ FLINT_SWAP(gr_coo_mat_struct, *mat1, *mat2);
+}
+
+void gr_csr_mat_fit_nnz(gr_csr_mat_t mat, slong nnz, gr_ctx_t ctx);
+void gr_lil_mat_fit_nnz(gr_lil_mat_t mat, slong *nnz, gr_ctx_t ctx);
+void gr_coo_mat_fit_nnz(gr_coo_mat_t mat, slong nnz, gr_ctx_t ctx);
+
+
+void gr_csr_mat_shrink_to_nnz(gr_csr_mat_t mat, gr_ctx_t ctx);
+void gr_lil_mat_shrink_to_nnz(gr_lil_mat_t mat, gr_ctx_t ctx);
+void gr_coo_mat_shrink_to_nnz(gr_coo_mat_t mat, gr_ctx_t ctx);
+
+
+void gr_csr_mat_set_cols(gr_csr_mat_t mat, slong cols, gr_ctx_t ctx);
+void gr_lil_mat_set_cols(gr_lil_mat_t mat, slong cols, gr_ctx_t ctx);
+void gr_coo_mat_set_cols(gr_coo_mat_t mat, slong cols, gr_ctx_t ctx);
+
+int gr_coo_mat_from_entries(gr_coo_mat_t mat, ulong *rows, ulong *cols, gr_srcptr entries, slong nnz, truth_t is_canonical, gr_ctx_t ctx);
+
+truth_t gr_coo_mat_is_canonical(gr_coo_mat_t mat, gr_ctx_t ctx);
+
+int gr_coo_mat_canonicalize(gr_coo_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_coo_mat_randtest(gr_coo_mat_t mat, slong nnz, int replacement, truth_t is_canonical, flint_rand_t state, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_coo_mat_randtest_prob(gr_coo_mat_t mat, double prob, flint_rand_t state, gr_ctx_t ctx);
+
+/**
+ * Getting, setting, and conversion
+**/
+
+GR_SPARSE_MAT_INLINE void
+_gr_csr_mat_borrow_row(gr_sparse_vec_t res, const gr_csr_mat_t mat, slong r, gr_ctx_t ctx) {
+ ulong offset;
+
+ offset = mat->rows[r];
+ res->length = mat->c;
+ res->nnz = mat->rows[r+1] - offset;
+ res->alloc = res->nnz;
+ res->inds = mat->cols + offset;
+ res->nzs = GR_ENTRY(mat->nzs, offset, ctx->sizeof_elem);
+}
+
+GR_SPARSE_MAT_INLINE void
+_gr_coo_mat_borrow_row(gr_sparse_vec_t res, gr_coo_mat_t mat, slong r, gr_ctx_t ctx) {
+ ulong offset, nnz;
+
+ if (mat->is_canonical == T_FALSE)
+ gr_coo_mat_canonicalize(mat, ctx);
+
+ // Binary search to find start and end of row
+ offset = 0;
+ nnz = 0;
+
+ res->length = mat->c;
+ res->nnz = nnz;
+ res->alloc = nnz;
+ res->inds = mat->cols + offset;
+ res->nzs = GR_ENTRY(mat->nzs, offset, ctx->sizeof_elem);
+}
+
+gr_ptr gr_csr_mat_find_entry(gr_csr_mat_t mat, slong row, slong col, gr_ctx_t ctx);
+gr_ptr gr_lil_mat_find_entry(gr_lil_mat_t mat, slong row, slong col, gr_ctx_t ctx);
+gr_ptr gr_coo_mat_find_entry(gr_coo_mat_t mat, slong row, slong col, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_csr_mat_get_entry(gr_ptr res, gr_csr_mat_t mat, slong row, slong col, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_get_entry(gr_ptr res, gr_lil_mat_t mat, slong row, slong col, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_get_entry(gr_ptr res, gr_coo_mat_t mat, slong row, slong col, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_csr_mat_set_entry(gr_csr_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_set_entry(gr_lil_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_set_entry(gr_coo_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx);
+
+
+GR_SPARSE_MAT_INLINE void
+gr_csr_mat_zero(gr_csr_mat_t mat, gr_ctx_t ctx) {
+ mat->nnz = 0;
+ memset(mat->rows, 0, (mat->r + 1) * sizeof(ulong));
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_lil_mat_zero(gr_lil_mat_t mat, gr_ctx_t ctx) {
+ int row;
+
+ mat->nnz = 0;
+ for(row = 0; row < mat->r; ++row)
+ {
+ gr_sparse_vec_zero(&mat->rows[row], ctx);
+ }
+}
+
+GR_SPARSE_MAT_INLINE void
+gr_coo_mat_zero(gr_coo_mat_t mat, gr_ctx_t ctx) {
+ mat->nnz = 0;
+}
+
+WARN_UNUSED_RESULT int
+gr_csr_mat_set(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_lil_mat_set(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_coo_mat_set(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_csr_mat_set_lil_mat(gr_csr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_lil_mat_set_csr_mat(gr_lil_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_coo_mat_set_csr_mat(gr_coo_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_coo_mat_set_lil_mat(gr_coo_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_lil_mat_set_coo_mat(gr_lil_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_csr_mat_set_coo_mat(gr_csr_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_csr_mat_set_mat(gr_csr_mat_t dst, const gr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_lil_mat_set_mat(gr_lil_mat_t dst, const gr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_coo_mat_set_mat(gr_coo_mat_t dst, const gr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_mat_set_csr_mat(gr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_mat_set_lil_mat(gr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_mat_set_coo_mat(gr_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx);
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_csr_mat_init_set(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx) {
+ gr_csr_mat_init(dst, src->r, src->c, ctx);
+ return gr_csr_mat_set(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_lil_mat_init_set(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx) {
+ gr_lil_mat_init(dst, src->r, src->c, ctx);
+ return gr_lil_mat_set(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_coo_mat_init_set(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx) {
+ gr_coo_mat_init(dst, src->r, src->c, ctx);
+ return gr_coo_mat_set(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_csr_mat_init_set_lil_mat(gr_csr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ gr_csr_mat_init(dst, src->r, src->c, ctx);
+ return gr_csr_mat_set_lil_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_lil_mat_init_set_csr_mat(gr_lil_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ gr_lil_mat_init(dst, src->r, src->c, ctx);
+ return gr_lil_mat_set_csr_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_csr_mat_init_set_coo_mat(gr_csr_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ gr_csr_mat_init(dst, src->r, src->c, ctx);
+ return gr_csr_mat_set_coo_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_lil_mat_init_set_coo_mat(gr_lil_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ gr_lil_mat_init(dst, src->r, src->c, ctx);
+ return gr_lil_mat_set_coo_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_coo_mat_init_set_csr_mat(gr_coo_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ gr_coo_mat_init(dst, src->r, src->c, ctx);
+ return gr_coo_mat_set_csr_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_coo_mat_init_set_lil_mat(gr_coo_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ gr_coo_mat_init(dst, src->r, src->c, ctx);
+ return gr_coo_mat_set_lil_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_csr_mat_init_set_mat(gr_csr_mat_t dst, const gr_mat_t src, gr_ctx_t ctx)
+{
+ gr_csr_mat_init(dst, src->r, src->c, ctx);
+ return gr_csr_mat_set_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_lil_mat_init_set_mat(gr_lil_mat_t dst, const gr_mat_t src, gr_ctx_t ctx)
+{
+ gr_lil_mat_init(dst, src->r, src->c, ctx);
+ return gr_lil_mat_set_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_coo_mat_init_set_mat(gr_coo_mat_t dst, const gr_mat_t src, gr_ctx_t ctx)
+{
+ gr_coo_mat_init(dst, src->r, src->c, ctx);
+ return gr_coo_mat_set_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_mat_init_set_csr_mat(gr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ gr_mat_init(dst, src->r, src->c, ctx);
+ return gr_mat_set_csr_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_mat_init_set_lil_mat(gr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ gr_mat_init(dst, src->r, src->c, ctx);
+ return gr_mat_set_lil_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_mat_init_set_coo_mat(gr_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ gr_mat_init(dst, src->r, src->c, ctx);
+ return gr_mat_set_coo_mat(dst, src, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int
+gr_lil_mat_update(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ slong row;
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ for (row = 0; row < dst->r; ++row)
+ status |= gr_sparse_vec_update(&dst->rows[row], &src->rows[row], ctx);
+ return status;
+}
+
+WARN_UNUSED_RESULT int gr_csr_mat_permute_cols(gr_csr_mat_t mat, slong * perm, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_permute_cols(gr_lil_mat_t mat, slong * perm, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_permute_cols(gr_coo_mat_t mat, slong * perm, gr_ctx_t ctx);
+
+void gr_lil_mat_window_init(gr_lil_mat_t window, const gr_lil_mat_t mat, slong r1, slong c1, slong r2, slong c2, gr_ctx_t ctx);
+
+GR_SPARSE_MAT_INLINE void
+gr_lil_mat_window_clear(gr_lil_mat_t window, gr_ctx_t ctx)
+{
+ flint_free(window->rows);
+}
+
+WARN_UNUSED_RESULT int gr_lil_mat_swap_rows(gr_lil_mat_t mat, slong * perm, slong r, slong s, gr_ctx_t ctx);
+//WARN_UNUSED_RESULT int gr_coo_mat_swap_rows(gr_coo_mat_t mat, slong * perm, slong r, slong s, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_permute_rows(gr_lil_mat_t mat, const slong * perm, gr_ctx_t ctx);
+//WARN_UNUSED_RESULT int gr_coo_mat_permute_rows(gr_coo_mat_t mat, const slong * perm, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_csr_mat_invert_rows(gr_csr_mat_t mat, slong * perm, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_invert_rows(gr_lil_mat_t mat, slong * perm, gr_ctx_t ctx);
+//WARN_UNUSED_RESULT int gr_coo_mat_invert_rows(gr_coo_mat_t mat, slong * perm, gr_ctx_t ctx);
+
+/*
+WARN_UNUSED_RESULT int gr_lil_mat_concat_horizontal(gr_lil_mat_t res, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_concat_vertical(gr_csr_mat_t res, const gr_csr_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_concat_vertical(gr_lil_mat_t res, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+*/
+
+/*
+WARN_UNUSED_RESULT int gr_sparse_mat_randops(gr_csr_mat_t mat, flint_rand_t state, slong count, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_randpermdiag(int * parity, gr_csr_mat_t mat, flint_rand_t state, gr_ptr diag, slong n, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_randrank(gr_csr_mat_t mat, flint_rand_t state, slong rank, gr_ctx_t ctx);
+*/
+
+/**
+ * Comparison
+**/
+
+GR_SPARSE_MAT_INLINE truth_t
+gr_csr_mat_is_zero(const gr_csr_mat_t mat, gr_ctx_t ctx)
+{ return _gr_vec_is_zero(mat->nzs, mat->nnz, ctx); }
+
+GR_SPARSE_MAT_INLINE truth_t
+gr_lil_mat_is_zero(const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ slong row;
+ truth_t row_is_zero;
+ truth_t ret = T_TRUE;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ row_is_zero = gr_sparse_vec_is_zero(&mat->rows[row], ctx);
+ if (row_is_zero == T_FALSE)
+ return T_FALSE;
+ else if (row_is_zero == T_UNKNOWN)
+ ret = T_UNKNOWN;
+ }
+ return ret;
+}
+
+GR_SPARSE_MAT_INLINE truth_t
+gr_coo_mat_is_zero(gr_coo_mat_t mat, gr_ctx_t ctx)
+{
+ if (mat->is_canonical == T_FALSE)
+ gr_coo_mat_canonicalize(mat, ctx);
+ return _gr_vec_is_zero(mat->nzs, mat->nnz, ctx);
+}
+
+truth_t gr_csr_mat_is_one(const gr_csr_mat_t mat, gr_ctx_t ctx);
+truth_t gr_lil_mat_is_one(const gr_lil_mat_t mat, gr_ctx_t ctx);
+truth_t gr_coo_mat_is_one(const gr_lil_mat_t mat, gr_ctx_t ctx);
+
+truth_t gr_csr_mat_is_neg_one(const gr_csr_mat_t mat, gr_ctx_t ctx);
+truth_t gr_lil_mat_is_neg_one(const gr_lil_mat_t mat, gr_ctx_t ctx);
+truth_t gr_coo_mat_is_neg_one(const gr_lil_mat_t mat, gr_ctx_t ctx);
+
+truth_t gr_csr_mat_equal(const gr_csr_mat_t mat1, const gr_csr_mat_t mat2, gr_ctx_t ctx);
+truth_t gr_lil_mat_equal(const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+truth_t gr_coo_mat_equal(const gr_coo_mat_t mat1, const gr_coo_mat_t mat2, gr_ctx_t ctx);
+truth_t gr_csr_mat_equal_lil_mat(const gr_csr_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+
+/*
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_csr_mat_one(gr_csr_mat_t res, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_set_scalar(gr_csr_mat_t res, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_set_ui(gr_csr_mat_t res, ulong v, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_set_si(gr_csr_mat_t res, slong v, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_set_fmpz(gr_csr_mat_t res, const fmpz_t v, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_set_fmpq(gr_csr_mat_t res, const fmpq_t v, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_csr_mat_set_fmpz_csr_mat(gr_csr_mat_t res, const fmpz_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_set_fmpq_csr_mat(gr_csr_mat_t res, const fmpq_csr_mat_t mat, gr_ctx_t ctx);
+*/
+
+/**
+ * Output
+**/
+
+WARN_UNUSED_RESULT int gr_csr_mat_write_nz(gr_stream_t out, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_write_nz(gr_stream_t out, const gr_lil_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_write_nz(gr_stream_t out, const gr_coo_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_print_nz(const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_print_nz(const gr_lil_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_print_nz(const gr_coo_mat_t mat, gr_ctx_t ctx);
+
+/**
+ * Arithmetic
+**/
+
+WARN_UNUSED_RESULT int gr_csr_mat_neg(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_neg(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_neg(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_add(gr_lil_mat_t dst, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_sub(gr_lil_mat_t dst, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul(gr_lil_mat_t dst, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_addmul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_submul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+
+/**
+ * Component-wise multiplication and division
+**/
+
+WARN_UNUSED_RESULT int gr_csr_mat_mul_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_mul_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_mul_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_mul_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_mul_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_mul_scalar_2exp_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_div_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_div_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_div_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_div_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_div_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_divexact_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_divexact_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_divexact_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_divexact_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_csr_mat_divexact_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_mul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_scalar_2exp_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_div_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_div_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_div_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_div_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_div_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_divexact_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_divexact_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_divexact_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_divexact_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_divexact_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_coo_mat_mul_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_mul_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_mul_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_mul_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_mul_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_mul_scalar_2exp_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_div_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_div_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_div_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_div_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_div_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_divexact_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_divexact_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_divexact_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_divexact_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_coo_mat_divexact_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx);
+
+/**
+ * Arithmetic into dense matrices
+*/
+
+WARN_UNUSED_RESULT int gr_mat_update_lil_mat_nz(gr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_add_lil_mat(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_sub_lil_mat(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_mul_lil_mat_nz(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_div_lil_mat_nz(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_addmul_lil_mat_scalar(gr_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_submul_lil_mat_scalar(gr_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_addmul_lil_mat_scalar_si(gr_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_submul_lil_mat_scalar_si(gr_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_addmul_lil_mat_scalar_fmpz(gr_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_mat_submul_lil_mat_scalar_fmpz(gr_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx);
+
+/**
+ * Sum and product
+**/
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_csr_mat_sum(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+{ return _gr_vec_sum(res, mat->nzs, mat->nnz, ctx); }
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_lil_mat_sum(gr_ptr res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ slong row;
+ int status = GR_SUCCESS;
+ gr_ptr elem;
+
+ elem = flint_malloc(ctx->sizeof_elem);
+ gr_init(elem, ctx);
+ for (row = 0; row < mat->r; ++row)
+ {
+ status |= gr_sparse_vec_sum(elem, &mat->rows[row], ctx);
+ status |= gr_add(res, res, elem, ctx);
+ }
+ gr_clear(elem, ctx);
+ return status;
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_coo_mat_sum(gr_ptr res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+{ return _gr_vec_sum(res, mat->nzs, mat->nnz, ctx); }
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_csr_mat_nz_product(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx)
+{ return _gr_vec_product(res, mat->nzs, mat->nnz, ctx); }
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_lil_mat_nz_product(gr_ptr res, const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ slong row;
+ int status = GR_SUCCESS;
+ gr_ptr elem;
+
+ elem = flint_malloc(ctx->sizeof_elem);
+ gr_init(elem, ctx);
+ status |= gr_one(elem, ctx);
+ for (row = 0; row < mat->r; ++row)
+ {
+ status |= gr_sparse_vec_nz_product(elem, &mat->rows[row], ctx);
+ status |= gr_mul(res, res, elem, ctx);
+ }
+ gr_clear(elem, ctx);
+ return status;
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_coo_mat_nz_product(gr_ptr res, const gr_coo_mat_t mat, gr_ctx_t ctx)
+{ return _gr_vec_product(res, mat->nzs, mat->nnz, ctx); }
+
+/**
+ * Transpose
+*/
+int gr_lil_mat_transpose(gr_lil_mat_t B, const gr_lil_mat_t A, gr_ctx_t ctx);
+
+/**
+ * Matrix multiplication
+**/
+
+WARN_UNUSED_RESULT int gr_csr_mat_mul_vec(gr_ptr v, const gr_csr_mat_t A, gr_srcptr u, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_vec(gr_ptr v, const gr_lil_mat_t A, gr_srcptr u, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_csr_mat_mul_mat_transpose(gr_mat_t Ct, const gr_csr_mat_t A, const gr_mat_t Bt, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_mat_transpose(gr_mat_t Ct, const gr_lil_mat_t A, const gr_mat_t Bt, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_csr_mat_mul_mat(gr_mat_t C, const gr_csr_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_mul_mat(gr_mat_t C, const gr_lil_mat_t A, const gr_mat_t B, gr_ctx_t ctx);
+
+/**
+ * LU and reduce echelon form representation
+**/
+
+WARN_UNUSED_RESULT int gr_lil_mat_lu(slong *rank, slong *P, slong *Q, gr_lil_mat_t L, gr_lil_mat_t U, const gr_lil_mat_t M, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_rref(slong *res_rank, gr_lil_mat_t R, gr_lil_mat_t A, gr_ctx_t ctx);
+
+/**
+ * Solving, nullvector, and nullspace computation
+**/
+
+WARN_UNUSED_RESULT int gr_lil_mat_solve_lanczos(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, flint_rand_t state, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_solve_block_lanczos(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, slong block_size, flint_rand_t state, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_solve_wiedemann(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_solve_block_wiedemann(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, slong block_size, flint_rand_t state, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_nullvector_lanczos(gr_ptr x, const gr_lil_mat_t M, flint_rand_t state, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_nullvector_block_lanczos(gr_ptr x, const gr_lil_mat_t M, slong block_size, flint_rand_t state, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_nullvector_wiedemann(gr_ptr x, const gr_lil_mat_t M, flint_rand_t state, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_lil_mat_nullvector_block_wiedemann(gr_ptr x, const gr_lil_mat_t M, slong block_size, flint_rand_t state, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_lil_mat_nullspace(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, const char *algorithm, slong block_size, gr_ctx_t ctx);
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_lil_mat_nullspace_lanczos(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, gr_ctx_t ctx)
+{
+ return gr_lil_mat_nullspace(X, M, state, max_iters, "lanczos", 1, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_lil_mat_nullspace_wiedemann(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, gr_ctx_t ctx)
+{
+ return gr_lil_mat_nullspace(X, M, state, max_iters, "wiedemann", 1, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_lil_mat_nullspace_block_lanczos(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, slong block_size, gr_ctx_t ctx)
+{
+ return gr_lil_mat_nullspace(X, M, state, max_iters, "block lanczos", block_size, ctx);
+}
+
+GR_SPARSE_MAT_INLINE WARN_UNUSED_RESULT int gr_lil_mat_nullspace_block_wiedemann(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, slong block_size, gr_ctx_t ctx)
+{
+ return gr_lil_mat_nullspace(X, M, state, max_iters, "block wiedemann", block_size, ctx);
+}
+
+
+/*
+WARN_UNUSED_RESULT int gr_sparse_mat_lu(slong * rank, slong * P, gr_csr_mat_t LU, const gr_csr_mat_t A, int rank_check, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_fflu(slong * res_rank, slong * P, gr_csr_mat_t LU, gr_ptr den, const gr_csr_mat_t A, int rank_check, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_fflu(gr_csr_mat_t X, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_lu(gr_csr_mat_t X, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve(gr_csr_mat_t X, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_fflu_precomp(gr_csr_mat_t X, const slong * perm, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_lu_precomp(gr_csr_mat_t X, const slong * perm, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_den_fflu(gr_csr_mat_t X, gr_ptr den, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_den(gr_csr_mat_t X, gr_ptr den, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_solve_field(gr_csr_mat_t X, const gr_csr_mat_t A, const gr_csr_mat_t B, gr_ctx_t ctx);
+*/
+/*
+WARN_UNUSED_RESULT int gr_sparse_mat_det_berkowitz(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_det_fflu(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_det_lu(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_det_cofactor(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_det_generic_field(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_det_generic_integral_domain(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_det_generic(gr_ptr res, const gr_sparse_mat_t A, gr_ctx_t ctx);
+*/
+/*
+WARN_UNUSED_RESULT int gr_sparse_mat_det(gr_ptr res, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_inv(gr_csr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_adjugate_charpoly(gr_csr_mat_t adj, gr_ptr det, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_adjugate_cofactor(gr_csr_mat_t adj, gr_ptr det, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_adjugate(gr_csr_mat_t adj, gr_ptr det, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_rank_lu(slong * rank, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_rank_fflu(slong * rank, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_rank(slong * rank, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_rref_lu(slong * res_rank, gr_csr_mat_t R, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_rref_fflu(slong * res_rank, gr_csr_mat_t R, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_rref(slong * res_rank, gr_csr_mat_t R, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_rref_den_fflu(slong * res_rank, gr_csr_mat_t R, gr_ptr den, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_rref_den(slong * res_rank, gr_csr_mat_t R, gr_ptr den, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_nullspace(gr_csr_mat_t X, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_ones(gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_pascal(gr_csr_mat_t mat, int triangular, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_stirling(gr_csr_mat_t mat, int kind, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_hilbert(gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_hadamard(gr_csr_mat_t mat, gr_ctx_t ctx);
+*/
+/* todo: dft, dct */
+/*
+WARN_UNUSED_RESULT int gr_sparse_mat_transpose(gr_csr_mat_t B, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_tril_classical(gr_csr_mat_t X, const gr_csr_mat_t L, const gr_csr_mat_t B, int unit, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_tril_recursive(gr_csr_mat_t X, const gr_csr_mat_t L, const gr_csr_mat_t B, int unit, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_tril(gr_csr_mat_t X, const gr_csr_mat_t L, const gr_csr_mat_t B, int unit, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_triu_classical(gr_csr_mat_t X, const gr_csr_mat_t U, const gr_csr_mat_t B, int unit, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_triu_recursive(gr_csr_mat_t X, const gr_csr_mat_t U, const gr_csr_mat_t B, int unit, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_nonsingular_solve_triu(gr_csr_mat_t X, const gr_csr_mat_t U, const gr_csr_mat_t B, int unit, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_trace(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_berkowitz(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_berkowitz(gr_poly_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_danilevsky_inplace(gr_ptr res, gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_danilevsky(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_danilevsky(gr_poly_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_faddeev(gr_ptr res, gr_csr_mat_t adj, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_faddeev(gr_poly_t res, gr_csr_mat_t adj, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_faddeev_bsgs(gr_ptr res, gr_csr_mat_t adj, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_faddeev_bsgs(gr_poly_t res, gr_csr_mat_t adj, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_from_hessenberg(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_from_hessenberg(gr_poly_t cp, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_gauss(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_gauss(gr_poly_t cp, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly_householder(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly_householder(gr_poly_t cp, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int _gr_sparse_mat_charpoly(gr_ptr res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_charpoly(gr_poly_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_hessenberg(gr_csr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_hessenberg_gauss(gr_csr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_hessenberg_householder(gr_csr_mat_t res, const gr_csr_mat_t mat, gr_ctx_t ctx);
+truth_t gr_sparse_mat_is_hessenberg(const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+int gr_sparse_mat_reduce_row(slong * column, gr_csr_mat_t A, slong * P, slong * L, slong m, gr_ctx_t ctx);
+int gr_sparse_mat_apply_row_similarity(gr_csr_mat_t A, slong r, gr_ptr d, gr_ctx_t ctx);
+int gr_sparse_mat_minpoly_field(gr_poly_t p, const gr_csr_mat_t X, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_eigenentries(gr_vec_t lambda, gr_vec_t mult, const gr_csr_mat_t mat, int flags, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_eigenentries_other(gr_vec_t lambda, gr_vec_t mult, const gr_csr_mat_t mat, gr_ctx_t mat_ctx, int flags, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_diagonalization_precomp(gr_vec_t D, gr_csr_mat_t L, gr_csr_mat_t R, const gr_csr_mat_t A, const gr_vec_t eigenentries, const gr_vec_t mult, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_diagonalization_generic(gr_vec_t D, gr_csr_mat_t L, gr_csr_mat_t R, const gr_csr_mat_t A, int flags, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_diagonalization(gr_vec_t D, gr_csr_mat_t L, gr_csr_mat_t R, const gr_csr_mat_t A, int flags, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_set_jordan_blocks(gr_csr_mat_t mat, const gr_vec_t lambda, slong num_blocks, slong * block_lambda, slong * block_size, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_jordan_blocks(gr_vec_t lambda, slong * num_blocks, slong * block_lambda, slong * block_size, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_jordan_transformation(gr_csr_mat_t mat, const gr_vec_t lambda, slong num_blocks, slong * block_lambda, slong * block_size, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_jordan_form(gr_csr_mat_t J, gr_csr_mat_t P, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+truth_t gr_sparse_mat_is_scalar(const gr_csr_mat_t mat, gr_ctx_t ctx);
+truth_t gr_sparse_mat_is_diagonal(const gr_csr_mat_t mat, gr_ctx_t ctx);
+truth_t gr_sparse_mat_is_lower_triangular(const gr_csr_mat_t mat, gr_ctx_t ctx);
+truth_t gr_sparse_mat_is_upper_triangular(const gr_csr_mat_t mat, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_mul_diag(gr_csr_mat_t C, const gr_csr_mat_t A, const gr_vec_t D, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_diag_mul(gr_csr_mat_t C, const gr_vec_t D, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_exp_jordan(gr_csr_mat_t res, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_exp(gr_csr_mat_t res, const gr_csr_mat_t A, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_mat_log_jordan(gr_csr_mat_t res, const gr_csr_mat_t A, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_mat_log(gr_csr_mat_t res, const gr_csr_mat_t A, gr_ctx_t ctx);
+*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gr_sparse_mat/arith.c b/src/gr_sparse_mat/arith.c
new file mode 100644
index 0000000000..8dae66a5be
--- /dev/null
+++ b/src/gr_sparse_mat/arith.c
@@ -0,0 +1,69 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+#define GR_LIL_MAT_BOP(FUNC, RES, MAT1, MAT2, CTX) \
+{ \
+ int row; \
+ int status = GR_SUCCESS; \
+ \
+ if (gr_mat_is_compatible(RES, MAT1, CTX) == T_FALSE || gr_mat_is_compatible(RES, MAT2, CTX) == T_FALSE) \
+ { \
+ return GR_DOMAIN; \
+ } \
+ (RES)->nnz = 0; \
+ for (row = 0; row < (RES)->r; row++) \
+{ \
+ status |= FUNC(&(RES)->rows[row], &(MAT1)->rows[row], &(MAT2)->rows[row], CTX); \
+ dst->nnz += dst->rows[row].nnz; \
+ } \
+ return status; \
+}
+
+int gr_lil_mat_add(gr_lil_mat_t dst, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+{ GR_LIL_MAT_BOP(gr_sparse_vec_add, dst, mat1, mat2, ctx) }
+
+int gr_lil_mat_sub(gr_lil_mat_t dst, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+{ GR_LIL_MAT_BOP(gr_sparse_vec_sub, dst, mat1, mat2, ctx) }
+
+int gr_lil_mat_mul(gr_lil_mat_t dst, const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+{ GR_LIL_MAT_BOP(gr_sparse_vec_mul, dst, mat1, mat2, ctx) }
+
+#define GR_LIL_MAT_ACCUM_OP(FUNC, RES, MAT, C, CTX) \
+{ \
+ int row; \
+ int status = GR_SUCCESS; \
+ \
+ if (gr_mat_is_compatible(RES, MAT, CTX) == T_FALSE) \
+ { \
+ return GR_DOMAIN; \
+ } \
+ (RES)->nnz = 0; \
+ for (row = 0; row < (RES)->r; row++) \
+ { \
+ status |= FUNC(&(RES)->rows[row], &(MAT)->rows[row], C, CTX); \
+ (RES)->nnz += (RES)->rows[row].nnz; \
+ } \
+ return status; \
+}
+
+int gr_lil_mat_addmul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{
+ GR_LIL_MAT_ACCUM_OP(gr_sparse_vec_addmul_scalar, dst, src, c, ctx)
+}
+
+int gr_lil_mat_submul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{
+ GR_LIL_MAT_ACCUM_OP(gr_sparse_vec_submul_scalar, dst, src, c, ctx)
+}
diff --git a/src/gr_sparse_mat/arith_dense.c b/src/gr_sparse_mat/arith_dense.c
new file mode 100644
index 0000000000..6947ce5eb0
--- /dev/null
+++ b/src/gr_sparse_mat/arith_dense.c
@@ -0,0 +1,134 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+
+WARN_UNUSED_RESULT int gr_mat_update_lil_mat_nz(gr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_update_sparse_vec_nz(dst->rows[row], &src->rows[row], ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_add_lil_mat(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src1, ctx) != T_TRUE || gr_mat_is_compatible(dst, src2, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src1->r; ++row)
+ success |= gr_vec_add_sparse_vec(dst->rows[row], src1->rows[row], &src2->rows[row], ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_sub_lil_mat(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src1, ctx) != T_TRUE || gr_mat_is_compatible(dst, src2, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src1->r; ++row)
+ success |= gr_vec_sub_sparse_vec(dst->rows[row], src1->rows[row], &src2->rows[row], ctx);
+ return success;
+}
+WARN_UNUSED_RESULT int gr_mat_mul_lil_mat_nz(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src1, ctx) != T_TRUE || gr_mat_is_compatible(dst, src2, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src1->r; ++row)
+ success |= gr_vec_mul_sparse_vec_nz(dst->rows[row], src1->rows[row], &src2->rows[row], ctx);
+ return success;
+}
+WARN_UNUSED_RESULT int gr_mat_div_lil_mat_nz(gr_mat_t dst, const gr_mat_t src1, const gr_lil_mat_t src2, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src1, ctx) != T_TRUE || gr_mat_is_compatible(dst, src2, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src1->r; ++row)
+ success |= gr_vec_div_sparse_vec_nz(dst->rows[row], src1->rows[row], &src2->rows[row], ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_addmul_lil_mat_scalar(gr_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_addmul_sparse_vec_scalar(dst->rows[row], &src->rows[row], c, ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_submul_lil_mat_scalar(gr_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_submul_sparse_vec_scalar(dst->rows[row], &src->rows[row], c, ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_addmul_lil_mat_scalar_si(gr_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_addmul_sparse_vec_scalar_si(dst->rows[row], &src->rows[row], c, ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_submul_lil_mat_scalar_si(gr_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_submul_sparse_vec_scalar_si(dst->rows[row], &src->rows[row], c, ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_addmul_lil_mat_scalar_fmpz(gr_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_addmul_sparse_vec_scalar_fmpz(dst->rows[row], &src->rows[row], c, ctx);
+ return success;
+}
+
+WARN_UNUSED_RESULT int gr_mat_submul_lil_mat_scalar_fmpz(gr_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{
+ slong row;
+ int success = GR_SUCCESS;
+ if (gr_mat_is_compatible(dst, src, ctx) != T_TRUE)
+ return GR_DOMAIN;
+ for (row = 0; row < src->r; ++row)
+ success |= gr_vec_submul_sparse_vec_scalar_fmpz(dst->rows[row], &src->rows[row], c, ctx);
+ return success;
+}
diff --git a/src/gr_sparse_mat/canonicalize.c b/src/gr_sparse_mat/canonicalize.c
new file mode 100644
index 0000000000..e4c3801059
--- /dev/null
+++ b/src/gr_sparse_mat/canonicalize.c
@@ -0,0 +1,138 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+typedef struct
+{
+ slong i;
+ slong row;
+ slong col;
+}
+sparse_mat_index_t;
+
+static int sparse_mat_index_cmp(const void* a, const void* b)
+{
+ slong arow = ((sparse_mat_index_t*)(a))->row;
+ slong brow = ((sparse_mat_index_t*)(b))->row;
+ slong acol = ((sparse_mat_index_t*)(a))->col;
+ slong bcol = ((sparse_mat_index_t*)(b))->col;
+ return (arow < brow ? -1 : (arow > brow ? 1 : (acol < bcol ? -1 : (acol > bcol ? 1 : 0))));
+}
+
+static sparse_mat_index_t * _sort_coords(ulong * rows, ulong * cols, slong num)
+{
+ slong i;
+ sparse_mat_index_t * si;
+
+ si = flint_malloc(num * sizeof(sparse_mat_index_t));
+ for (i = 0; i < num; i++)
+ {
+ si[i].i = i;
+ si[i].row = rows[i];
+ si[i].col = cols[i];
+ }
+
+ qsort(si, num, sizeof(sparse_mat_index_t), sparse_mat_index_cmp);
+ return si;
+}
+
+truth_t gr_coo_mat_is_canonical(gr_coo_mat_t mat, gr_ctx_t ctx)
+{
+ slong i, sz;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+ sz = ctx->sizeof_elem;
+
+ for (i = 0; i < mat->nnz; ++i)
+ {
+ // Check that indices are unique and in order
+ if (i > 0 && (mat->rows[i] < mat->rows[i-1] || (mat->rows[i] == mat->rows[i-1] && mat->cols[i] <= mat->cols[i-1])))
+ return T_FALSE;
+
+ // Check that entries are not known to be zero
+ if (is_zero(GR_ENTRY(mat->nzs, i, sz), ctx) == T_TRUE)
+ return T_FALSE;
+ }
+ return T_TRUE;
+}
+
+int gr_coo_mat_canonicalize(gr_coo_mat_t mat, gr_ctx_t ctx)
+{
+ slong i, j, k, sz, nnz;
+ int status = GR_SUCCESS;
+ sparse_mat_index_t *si;
+ ulong *inv_si;
+ gr_ptr entry;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+
+ if (mat->is_canonical == T_TRUE)
+ return GR_SUCCESS;
+
+ sz = ctx->sizeof_elem;
+
+ //gr_coo_mat_print_nz(mat, ctx);
+
+ // Get sorted order for matrices indices (and inverse mapping)
+ si = _sort_coords(mat->rows, mat->cols, mat->nnz);
+ inv_si = flint_malloc(mat->nnz * sizeof(ulong));
+ for (i = 0; i < mat->nnz; ++i)
+ inv_si[si[i].i] = i;
+
+ // Use swaps to apply sort to entries
+ for (i = 0; i < mat->nnz; ++i)
+ {
+ j = si[i].i;
+ if (i != j)
+ {
+ FLINT_SWAP(ulong, mat->rows[i], mat->rows[j]);
+ FLINT_SWAP(ulong, mat->cols[i], mat->cols[j]);
+ gr_swap(GR_ENTRY(mat->nzs, i, sz), GR_ENTRY(mat->nzs, j, sz), ctx);
+
+ // Fix mappings to remove i from permutation
+ k = inv_si[i];
+ si[k].i = j;
+ inv_si[j] = k;
+ }
+ }
+ flint_free(si);
+ flint_free(inv_si);
+ //gr_coo_mat_print_nz(mat, ctx);
+
+ // Compress duplicated entries
+ nnz = 0;
+ entry = NULL;
+ for (i = 0; i < mat->nnz; ++i)
+ {
+ if(i > 0 && mat->rows[i-1] == mat->rows[i] && mat->cols[i-1] == mat->cols[i])
+ status |= gr_add(entry, entry, GR_ENTRY(mat->nzs, i, sz), ctx);
+ else
+ {
+ // If previous entry does not exist or is not zero, advance to the next one
+ if (entry == NULL || is_zero(entry, ctx) != T_TRUE)
+ {
+ entry = GR_ENTRY(mat->nzs, nnz, sz);
+ ++nnz;
+ }
+ mat->rows[nnz-1] = mat->rows[i];
+ mat->cols[nnz-1] = mat->cols[i];
+ status |= gr_set(entry, GR_ENTRY(mat->nzs, i, sz), ctx);
+ }
+ }
+ if (entry != NULL && is_zero(entry, ctx) == T_TRUE)
+ --nnz;
+ mat->nnz = nnz;
+ //gr_coo_mat_print_nz(mat, ctx);
+ mat->is_canonical = gr_coo_mat_is_canonical(mat, ctx);
+ return status;
+}
+
+
diff --git a/src/gr_sparse_mat/equal.c b/src/gr_sparse_mat/equal.c
new file mode 100644
index 0000000000..500e4802a8
--- /dev/null
+++ b/src/gr_sparse_mat/equal.c
@@ -0,0 +1,97 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+truth_t
+gr_csr_mat_equal(const gr_csr_mat_t mat1, const gr_csr_mat_t mat2, gr_ctx_t ctx)
+{
+ if
+ (
+ gr_mat_is_compatible(mat1, mat2, ctx) == T_FALSE ||
+ mat1->nnz != mat2->nnz ||
+ memcmp(mat1->rows, mat2->rows, (mat1->r + 1) * sizeof(ulong)) ||
+ memcmp(mat1->cols, mat2->cols, mat1->nnz * sizeof(ulong))
+ )
+ {
+ return T_FALSE;
+ }
+ return _gr_vec_equal(mat1->nzs, mat2->nzs, mat1->nnz, ctx);
+}
+
+truth_t
+gr_lil_mat_equal(const gr_lil_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+{
+ slong row;
+ truth_t row_is_eq;
+ truth_t ret = T_TRUE;
+
+ if (gr_mat_is_compatible(mat1, mat2, ctx) == T_FALSE || mat1->nnz != mat2->nnz)
+ {
+ return T_FALSE;
+ }
+ for (row = 0; row < mat1->r; row++)
+ {
+ row_is_eq = gr_sparse_vec_equal(&mat1->rows[row], &mat2->rows[row], ctx);
+ if (row_is_eq == T_FALSE)
+ return T_FALSE;
+ else if (row_is_eq == T_UNKNOWN)
+ ret = T_UNKNOWN;
+ }
+ return ret;
+}
+
+truth_t gr_coo_mat_equal(const gr_coo_mat_t mat1, const gr_coo_mat_t mat2, gr_ctx_t ctx)
+{
+ if (gr_mat_is_compatible(mat1, mat2, ctx) == T_FALSE)
+ {
+ return T_FALSE;
+ }
+ if (mat1->is_canonical == T_FALSE || mat2->is_canonical == T_FALSE)
+ {
+ return T_UNKNOWN;
+ }
+ if
+ (
+ mat1->nnz != mat2->nnz ||
+ memcmp(mat1->rows, mat2->rows, mat1->nnz * sizeof(ulong)) ||
+ memcmp(mat1->cols, mat2->cols, mat1->nnz * sizeof(ulong))
+ )
+ {
+ return T_FALSE;
+ }
+ return _gr_vec_equal(mat1->nzs, mat2->nzs, mat1->nnz, ctx);
+}
+
+truth_t
+gr_csr_mat_equal_lil_mat(const gr_csr_mat_t mat1, const gr_lil_mat_t mat2, gr_ctx_t ctx)
+{
+ slong row;
+ gr_sparse_vec_t tmp;
+ truth_t row_is_eq;
+ truth_t ret = T_TRUE;
+
+ if (gr_mat_is_compatible(mat1, mat2, ctx) == T_FALSE || mat1->nnz != mat2->nnz)
+ {
+ return T_FALSE;
+ }
+ for (row = 0; row < mat1->r; row++)
+ {
+ _gr_csr_mat_borrow_row(tmp, mat1, row, ctx);
+ row_is_eq = gr_sparse_vec_equal(tmp, &mat2->rows[row], ctx);
+ if (row_is_eq == T_FALSE)
+ return T_FALSE;
+ else if (row_is_eq == T_UNKNOWN)
+ ret = T_UNKNOWN;
+ }
+ return ret;
+}
+
diff --git a/src/gr_sparse_mat/fit_nnz.c b/src/gr_sparse_mat/fit_nnz.c
new file mode 100644
index 0000000000..45fc747371
--- /dev/null
+++ b/src/gr_sparse_mat/fit_nnz.c
@@ -0,0 +1,93 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+void
+gr_csr_mat_fit_nnz(gr_csr_mat_t mat, slong nnz, gr_ctx_t ctx)
+{
+ slong alloc = mat->alloc;
+ slong new_alloc = nnz;
+ if (new_alloc > alloc)
+ {
+ slong sz = ctx->sizeof_elem;
+ mat->cols = flint_realloc(mat->cols, new_alloc * sizeof(ulong));
+ mat->nzs = flint_realloc(mat->nzs, new_alloc * sz);
+ _gr_vec_init(GR_ENTRY(mat->nzs, alloc, sz), new_alloc - alloc, ctx);
+ mat->alloc = new_alloc;
+ }
+}
+
+void
+gr_lil_mat_fit_nnz(gr_lil_mat_t mat, slong *nnz, gr_ctx_t ctx)
+{
+ int row;
+
+ for (row = 0; row < mat->r; ++row)
+ gr_sparse_vec_fit_nnz(&mat->rows[row], nnz[row], ctx);
+}
+
+void
+gr_coo_mat_fit_nnz(gr_coo_mat_t mat, slong nnz, gr_ctx_t ctx)
+{
+ slong alloc = mat->alloc;
+ slong new_alloc = nnz;
+ if (new_alloc > alloc)
+ {
+ slong sz = ctx->sizeof_elem;
+ mat->rows = flint_realloc(mat->rows, new_alloc * sizeof(ulong));
+ mat->cols = flint_realloc(mat->cols, new_alloc * sizeof(ulong));
+ mat->nzs = flint_realloc(mat->nzs, new_alloc * sz);
+ _gr_vec_init(GR_ENTRY(mat->nzs, alloc, sz), new_alloc - alloc, ctx);
+ mat->alloc = new_alloc;
+ }
+}
+
+void
+gr_csr_mat_shrink_to_nnz(gr_csr_mat_t mat, gr_ctx_t ctx)
+{
+ slong nnz = mat->nnz;
+ slong sz = ctx->sizeof_elem;
+ if (mat->alloc > nnz)
+ {
+ mat->cols = flint_realloc(mat->cols, nnz * sizeof(ulong));
+ _gr_vec_clear(GR_ENTRY(mat->nzs, nnz, sz), mat->alloc - nnz, ctx);
+ mat->nzs = flint_realloc(mat->nzs, nnz * sz);
+ mat->alloc = nnz;
+ }
+}
+
+void
+gr_lil_mat_shrink_to_nnz(gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ slong row;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ gr_sparse_vec_shrink_to_nnz(&mat->rows[row], ctx);
+ }
+}
+
+void
+gr_coo_mat_shrink_to_nnz(gr_coo_mat_t mat, gr_ctx_t ctx)
+{
+ slong nnz = mat->nnz;
+ slong sz = ctx->sizeof_elem;
+ if (mat->alloc > nnz)
+ {
+ mat->rows = flint_realloc(mat->cols, nnz * sizeof(ulong));
+ mat->cols = flint_realloc(mat->cols, nnz * sizeof(ulong));
+ _gr_vec_clear(GR_ENTRY(mat->nzs, nnz, sz), mat->alloc - nnz, ctx);
+ mat->nzs = flint_realloc(mat->nzs, nnz * sz);
+ mat->alloc = nnz;
+ }
+}
+
diff --git a/src/gr_sparse_mat/from_entries.c b/src/gr_sparse_mat/from_entries.c
new file mode 100644
index 0000000000..3a8f4d6bcc
--- /dev/null
+++ b/src/gr_sparse_mat/from_entries.c
@@ -0,0 +1,29 @@
+#include
+#include "gr_sparse_mat.h"
+
+static int check_coords(ulong * rows, ulong * cols, slong num, slong r, slong c)
+{
+ slong i;
+
+ for (i = 0; i < num; ++i)
+ if (rows[i] >= r || cols[i] >= c)
+ return 0;
+ return 1;
+}
+
+int gr_coo_mat_from_entries(gr_coo_mat_t mat, ulong *rows, ulong *cols, gr_srcptr entries, slong nnz, truth_t is_canonical, gr_ctx_t ctx)
+{
+ int status;
+
+ if (!check_coords(rows, cols, nnz, mat->r, mat->c))
+ return GR_DOMAIN;
+
+ gr_coo_mat_fit_nnz(mat, nnz, ctx);
+ memcpy(mat->rows, rows, nnz * sizeof(ulong));
+ memcpy(mat->cols, cols, nnz * sizeof(ulong));
+ status = _gr_vec_set(mat->nzs, entries, nnz, ctx);
+ mat->nnz = nnz;
+ mat->is_canonical = is_canonical;
+ return status;
+}
+
diff --git a/src/gr_sparse_mat/get_entry.c b/src/gr_sparse_mat/get_entry.c
new file mode 100644
index 0000000000..06aa605083
--- /dev/null
+++ b/src/gr_sparse_mat/get_entry.c
@@ -0,0 +1,95 @@
+#include
+#include "gr_sparse_mat.h"
+
+
+static int ulong_cmp(const void* a, const void* b)
+{
+ ulong av = *((ulong*)(a));
+ ulong bv = *((ulong*)(b));
+ return (av < bv ? -1 : (av > bv ? 1 : 0));
+}
+
+gr_ptr gr_csr_mat_find_entry(gr_csr_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+{
+ slong sz = ctx->sizeof_elem;
+ ulong* bs = NULL;
+
+ if (row < 0 || row >= mat->r || col < 0 || col >= mat->c)
+ return NULL;
+
+ bs = bsearch(&col, mat->cols + mat->rows[row], mat->rows[row+1] - mat->rows[row], sizeof(ulong), ulong_cmp);
+
+ if (bs == NULL)
+ return NULL;
+ return GR_ENTRY(mat->nzs, bs - mat->cols, sz);
+}
+
+gr_ptr
+gr_lil_mat_find_entry(gr_lil_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+{
+ if (row < 0 || row >= mat->r)
+ return NULL;
+
+ return gr_sparse_vec_find_entry(&mat->rows[row], col, ctx);
+}
+
+gr_ptr gr_coo_mat_find_entry(gr_coo_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+{
+ slong i, lower, upper;
+ slong sz = ctx->sizeof_elem;
+
+ if (row < 0 || row >= mat->r || col < 0 || col >= mat->c)
+ return NULL;
+
+ if (mat->is_canonical)
+ {
+ // Find using (manual) binary search
+ lower = 0;
+ upper = mat->nnz;
+ while (lower <= upper)
+ {
+ i = (lower + upper)/2;
+ if (mat->rows[i] < row || (mat->rows[i] == row && mat->cols[i] < col))
+ lower = i + 1;
+ else if (mat->rows[i] > row || (mat->rows[i] == row && mat->cols[i] > col))
+ upper = i - 1;
+ else
+ return GR_ENTRY(mat->nzs, i, sz);
+ }
+ }
+ else
+ {
+ // Exhaust to find first instance
+ for (i = 0; i < mat->nnz; ++i)
+ if (mat->rows[i] == row && mat->cols[i] == col)
+ return GR_ENTRY(mat->nzs, i, sz);
+ }
+ return NULL;
+}
+
+int gr_csr_mat_get_entry(gr_ptr res, gr_csr_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+{
+ gr_ptr ptr = gr_csr_mat_find_entry(mat, row, col, ctx);
+
+ if (ptr == NULL)
+ return gr_zero(res, ctx);
+ return gr_set(res, ptr, ctx);
+}
+
+int gr_lil_mat_get_entry(gr_ptr res, gr_lil_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+{
+ gr_ptr ptr = gr_lil_mat_find_entry(mat, row, col, ctx);
+
+ if (ptr == NULL)
+ return gr_zero(res, ctx);
+ return gr_set(res, ptr, ctx);
+}
+
+int gr_coo_mat_get_entry(gr_ptr res, gr_coo_mat_t mat, slong row, slong col, gr_ctx_t ctx)
+{
+ gr_ptr ptr = gr_coo_mat_find_entry(mat, row, col, ctx);
+
+ if (ptr == NULL)
+ return gr_zero(res, ctx);
+ return gr_set(res, ptr, ctx);
+}
diff --git a/src/gr_sparse_mat/invert_rows.c b/src/gr_sparse_mat/invert_rows.c
new file mode 100644
index 0000000000..551e82765d
--- /dev/null
+++ b/src/gr_sparse_mat/invert_rows.c
@@ -0,0 +1,70 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+int gr_csr_mat_invert_rows(gr_csr_mat_t mat, slong * perm, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong r = mat->r;
+ slong nnz = mat->nnz;
+ slong i, j, k;
+ slong sz = ctx->sizeof_elem;
+
+ // Handle permutation if provided
+ if (perm != NULL)
+ {
+ for (i = 0; i < r / 2; i++)
+ {
+ FLINT_SWAP(slong, perm[i], perm[r - i - 1]);
+ }
+ }
+
+ // Reverse row offsets
+ for (i = 0; i < r / 2; ++i)
+ {
+ FLINT_SWAP(ulong, mat->rows[i + 1], mat->rows[r - i]);
+ }
+
+ // Reverse all columns and elements
+ for (j = 0; j < nnz / 2; ++j)
+ {
+ k = nnz - j - 1;
+ FLINT_SWAP(ulong, mat->cols[j], mat->cols[k]);
+ gr_swap(GR_ENTRY(mat->nzs, j, sz), GR_ENTRY(mat->nzs, k, sz), ctx);
+ }
+
+ // Reverse columns and elements in each row
+ for (i = 0; i < r; ++i)
+ {
+ // Fix row offset
+ mat->rows[i+1] -= (i < r - 1 ? mat->rows[i+2] : 0) - mat->rows[i];
+ for (j = mat->rows[i]; j < mat->rows[i+1] / 2; ++j)
+ {
+ k = mat->rows[i+1] - j - 1;
+ FLINT_SWAP(ulong, mat->cols[j], mat->cols[k]);
+ gr_swap(GR_ENTRY(mat->nzs, j, sz), GR_ENTRY(mat->nzs, k, sz), ctx);
+ }
+ }
+ return status;
+}
+
+int gr_lil_mat_invert_rows(gr_lil_mat_t mat, slong * perm, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong r = mat->r;
+ slong i;
+
+ for (i = 0; i < r / 2; i++)
+ status |= gr_lil_mat_swap_rows(mat, perm, i, r - i - 1, ctx);
+
+ return status;
+}
diff --git a/src/gr_sparse_mat/is_neg_one.c b/src/gr_sparse_mat/is_neg_one.c
new file mode 100644
index 0000000000..e0d0f9a1ea
--- /dev/null
+++ b/src/gr_sparse_mat/is_neg_one.c
@@ -0,0 +1,59 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+truth_t
+gr_csr_mat_is_neg_one(const gr_csr_mat_t mat, gr_ctx_t ctx)
+{
+ slong row, idx;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+ gr_method_unary_predicate is_neg_one = GR_UNARY_PREDICATE(ctx, IS_NEG_ONE);
+ truth_t this_eq;
+ truth_t ret = T_TRUE;
+ slong sz = ctx->sizeof_elem;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ for (idx = mat->rows[row]; idx < mat->rows[row+1]; idx++)
+ {
+ this_eq = (mat->cols[idx] == row ? is_neg_one : is_zero)(GR_ENTRY(mat->nzs, idx, sz), ctx);
+ if (this_eq == T_FALSE)
+ return T_FALSE;
+ else
+ ret = T_UNKNOWN;
+ }
+ }
+ return ret;
+}
+
+truth_t gr_lil_mat_is_neg_one(const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ slong row, idx;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+ gr_method_unary_predicate is_neg_one = GR_UNARY_PREDICATE(ctx, IS_NEG_ONE);
+ truth_t this_eq;
+ truth_t ret = T_TRUE;
+ slong sz = ctx->sizeof_elem;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ for (idx = 0; idx < mat->rows[row].nnz; idx++)
+ {
+ this_eq = (mat->rows[row].inds[idx] == row ? is_neg_one : is_zero)(GR_ENTRY(mat->rows[row].nzs, idx, sz), ctx);
+ if (this_eq == T_FALSE)
+ return T_FALSE;
+ else
+ ret = T_UNKNOWN;
+ }
+ }
+ return ret;
+}
diff --git a/src/gr_sparse_mat/is_one.c b/src/gr_sparse_mat/is_one.c
new file mode 100644
index 0000000000..d94ad3a78f
--- /dev/null
+++ b/src/gr_sparse_mat/is_one.c
@@ -0,0 +1,58 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+truth_t gr_csr_mat_is_one(const gr_csr_mat_t mat, gr_ctx_t ctx)
+{
+ slong row, idx;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+ gr_method_unary_predicate is_one = GR_UNARY_PREDICATE(ctx, IS_ONE);
+ truth_t this_eq;
+ truth_t ret = T_TRUE;
+ slong sz = ctx->sizeof_elem;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ for (idx = mat->rows[row]; idx < mat->rows[row+1]; idx++)
+ {
+ this_eq = (mat->cols[idx] == row ? is_one : is_zero)(GR_ENTRY(mat->nzs, idx, sz), ctx);
+ if (this_eq == T_FALSE)
+ return T_FALSE;
+ else
+ ret = T_UNKNOWN;
+ }
+ }
+ return ret;
+}
+
+truth_t gr_lil_mat_is_one(const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ slong row, idx;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+ gr_method_unary_predicate is_one = GR_UNARY_PREDICATE(ctx, IS_ONE);
+ truth_t this_eq;
+ truth_t ret = T_TRUE;
+ slong sz = ctx->sizeof_elem;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ for (idx = 0; idx < mat->rows[row].nnz; idx++)
+ {
+ this_eq = (mat->rows[row].inds[idx] == row ? is_one : is_zero)(GR_ENTRY(mat->rows[row].nzs, idx, sz), ctx);
+ if (this_eq == T_FALSE)
+ return T_FALSE;
+ else
+ ret = T_UNKNOWN;
+ }
+ }
+ return ret;
+}
diff --git a/src/gr_sparse_mat/lu.c b/src/gr_sparse_mat/lu.c
new file mode 100644
index 0000000000..52f2609614
--- /dev/null
+++ b/src/gr_sparse_mat/lu.c
@@ -0,0 +1,214 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+static void heap_up(slong *heap, slong *heap_idx, slong *scores, slong pos)
+{
+ const slong c = heap[pos];
+ slong nc, npos;
+ for (; pos > 0; pos = npos)
+ {
+ npos = (pos-1)/2;
+ nc = heap[npos];
+ if (scores[c] >= scores[nc]) break;
+
+ heap[pos] = nc;
+ heap_idx[nc] = pos;
+ }
+ heap[pos] = c;
+ heap_idx[c] = pos;
+}
+
+static void heap_down(slong *heap, slong *heap_idx, slong *scores, slong size, slong pos)
+{
+ const slong c = heap[pos];
+ slong nc, npos;
+ for (; pos < (size-1)/2; pos = npos)
+ {
+ npos = 2*pos+1;
+ if (npos+1 < size && scores[heap[npos]] > scores[heap[npos+1]]) ++npos;
+ nc = heap[npos];
+ if (scores[c] <= scores[nc]) break;
+
+ heap[pos] = nc;
+ heap_idx[nc] = pos;
+ }
+ heap[pos] = c;
+ heap_idx[c] = pos;
+}
+
+/* static void print_heap(slong *heap, slong *scores, slong size)
+{
+ slong level, i;
+ for (level = 1; level <= size; level<<=1)
+ {
+ for (i = level; i <= size && i < 2*level; ++i)
+ {
+ flint_printf("%wd:%wd,%wd\t", i-1, heap[i-1], scores[heap[i-1]]);
+ }
+ flint_printf("\n");
+ }
+}
+ */
+int gr_lil_mat_lu(
+ slong *res_rank, slong *P, slong *Q,
+ gr_lil_mat_t L, gr_lil_mat_t U,
+ const gr_lil_mat_t A, gr_ctx_t ctx
+)
+{
+ slong i, j, r, c, rank, pr, pc, remr, remc;
+ slong *heap, *heap_idx, *scores, heap_size;
+ gr_lil_mat_t Lt;
+ gr_sparse_vec_struct *pcol, *prow, *row, *col;
+ gr_ptr cinv, cc;
+ int status = GR_SUCCESS;
+
+ if (A->r == 0 || A->c == 0 || A->nnz == 0)
+ {
+ *res_rank = 0;
+ gr_lil_mat_zero(L, ctx);
+ gr_lil_mat_zero(U, ctx);
+ for (i = 0; i < A->r; ++i) P[i] = i;
+ for (j = 0; j < A->c; ++j) Q[j] = j;
+ return GR_SUCCESS;
+ }
+ GR_TMP_INIT2(cinv, cc, ctx);
+ gr_lil_mat_init(Lt, L->c, L->r, ctx);
+ status |= gr_lil_mat_transpose(Lt, A, ctx);
+ status |= gr_lil_mat_set(U, A, ctx);
+
+ /* Set up permutations */
+ remr = A->r, remc = A->c;
+ for (r = 0; r < A->r; ++r)
+ {
+ if (!U->rows[r].nnz) P[r] = --remr;
+ else P[r] = -1;
+ }
+ if (Q != NULL)
+ {
+ for (j = 0; j < A->c; ++j)
+ {
+ if (!Lt->rows[j].nnz) Q[j] = --remc;
+ else Q[j] = -1;
+ }
+ /* Make heap of nonzero columns by size */
+ heap_size = A->c;
+ heap = flint_malloc(A->c*sizeof(*heap));
+ scores = flint_malloc(A->c*sizeof(*scores));
+ heap_idx = flint_malloc(A->c*sizeof(*heap_idx));
+ for (j = 0; j < A->c; ++j)
+ {
+ scores[j] = Lt->rows[j].nnz; /* TODO: randomized tiebreaker */
+ heap[j] = j;
+ heap_up(heap, heap_idx, scores, j);
+ }
+ }
+
+ /* Run elimination */
+ rank = 0;
+ for (heap_size = A->c; heap_size > 0; )
+ {
+
+ /* Get pivot column */
+ if (Q != NULL)
+ {
+ /* Get lowest weight column (top of heap) */
+ pc = heap[0];
+ pcol = &Lt->rows[pc];
+ heap[0] = heap[--heap_size];
+ heap_down(heap, heap_idx, scores, heap_size, 0);
+ if (pcol->nnz == 0) continue; /* Empty columns already dealt with */
+ Q[pc] = rank; /* Move pivot column to front */
+ }
+ else
+ {
+ pc = A->c - heap_size--;
+ pcol = &Lt->rows[pc];
+ if (pcol->nnz == 0) continue; /* Nothing to do */
+ }
+
+ /* Get lowest weight incident row */
+ pr = pcol->inds[0], prow = &U->rows[pr];
+ for (j = 1; j < pcol->nnz; ++j)
+ {
+ r = pcol->inds[j], row = &U->rows[r];
+ if (row->nnz < prow->nnz) pr = r, prow = row;
+ }
+ P[pr] = rank; /* Move pivot row to front */
+
+ /* Invert pivot */
+ status |= gr_inv(cinv, gr_sparse_vec_find_entry(prow, pc, ctx), ctx);
+
+ /* Lower triangular matrix will have ones on the diagonal */
+ status |= gr_sparse_vec_mul_scalar(pcol, pcol, cinv, ctx);
+
+ /* Gaussian eliminate lower rows in U incident on pivot column */
+ for (j = 0; j < pcol->nnz; ++j)
+ {
+ r = pcol->inds[j], row = &U->rows[r];
+ if (P[r] >= 0) continue; /* Skip previous pivot rows */
+
+ status |= gr_mul(cc, cinv, gr_sparse_vec_find_entry(row, pc, ctx), ctx);
+ status |= gr_neg(cc, cc, ctx);
+ status |= gr_sparse_vec_addmul_scalar(row, prow, cc, ctx);
+ if (row->nnz == 0) P[r] = --remr;
+ }
+ /* Gaussian eliminate lower cols in L incident on pivot row */
+ for (j = 0; j < prow->nnz; ++j)
+ {
+ c = prow->inds[j], col = &Lt->rows[c];
+ if ((Q == NULL && (c >= A->c || c<=pc)) || (Q != NULL && Q[c] >= 0))
+ continue; /* Skip previous pivot columns */
+ status |= gr_neg(cc, gr_sparse_vec_find_entry(col, pr, ctx), ctx);
+ status |= gr_sparse_vec_addmul_scalar(col, pcol, cc, ctx);
+
+ if (Q != NULL)
+ {
+ if (col->nnz == 0) Q[c] = --remc;
+ scores[c] = col->nnz;
+ heap_up(heap, heap_idx, scores, heap_idx[c]);
+ heap_down(heap, heap_idx, scores, heap_size, heap_idx[c]);
+ }
+ }
+ rank += 1;
+ }
+ /* Fix nnz */
+ Lt->nnz = 0;
+ for (j = 0; j < A->c; ++j)
+ {
+ Lt->nnz += Lt->rows[j].nnz;
+ }
+ U->nnz = 0;
+ for (j = 0; j < A->r; ++j)
+ {
+ U->nnz += U->rows[j].nnz;
+ }
+
+ /* Transpose L^t */
+ status |= gr_lil_mat_transpose(L, Lt, ctx);
+
+ /* Reorder rows and cols in L and U */
+ status |= gr_lil_mat_permute_rows(L, P, ctx);
+ status |= gr_lil_mat_permute_rows(U, P, ctx);
+ if (Q != NULL)
+ {
+ status |= gr_lil_mat_permute_cols(L, Q, ctx);
+ status |= gr_lil_mat_permute_cols(U, Q, ctx);
+ }
+ *res_rank = rank;
+
+ gr_lil_mat_clear(Lt, ctx);
+ GR_TMP_INIT2(cinv, cc, ctx);
+ return status;
+}
diff --git a/src/gr_sparse_mat/mul.c b/src/gr_sparse_mat/mul.c
new file mode 100644
index 0000000000..f05aadd89d
--- /dev/null
+++ b/src/gr_sparse_mat/mul.c
@@ -0,0 +1,237 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include
+#include
+#include "flint.h"
+#include "gr_sparse_mat.h"
+
+int gr_csr_mat_mul_vec(gr_ptr v, const gr_csr_mat_t A, gr_srcptr u, gr_ctx_t ctx)
+{
+ slong ar, i, sz;
+ int status;
+ gr_sparse_vec_t row;
+
+ sz = ctx->sizeof_elem;
+ ar = gr_sparse_mat_nrows(A, ctx);
+
+ if (gr_csr_mat_is_zero(A, ctx) == T_TRUE)
+ {
+ return _gr_vec_zero(v, ar, ctx);
+ }
+
+ if (u == v)
+ {
+ gr_ptr w;
+ GR_TMP_INIT_VEC(w, ar, ctx);
+ status = gr_csr_mat_mul_vec(w, A, u, ctx);
+ _gr_vec_swap(v, w, ar, ctx);
+ GR_TMP_CLEAR_VEC(w, ar, ctx);
+ return status;
+ }
+
+ status = _gr_vec_zero(v, ar, ctx);
+ for (i = 0; i < ar; ++i) {
+ _gr_csr_mat_borrow_row(row, A, i, ctx);
+ status |= gr_sparse_vec_dot_vec(GR_ENTRY(v, i, sz), GR_ENTRY(v, i, sz), 0, row, u, ctx);
+ }
+
+ return status;
+}
+
+int gr_lil_mat_mul_vec(gr_ptr v, const gr_lil_mat_t A, gr_srcptr u, gr_ctx_t ctx)
+{
+ slong ar, i, sz;
+ int status;
+
+ sz = ctx->sizeof_elem;
+ ar = gr_sparse_mat_nrows(A, ctx);
+
+ if (gr_lil_mat_is_zero(A, ctx) == T_TRUE)
+ {
+ return _gr_vec_zero(v, ar, ctx);
+ }
+
+ status = GR_SUCCESS;
+
+ if (u == v)
+ {
+ gr_ptr w;
+ GR_TMP_INIT_VEC(w, ar, ctx);
+ status |= gr_lil_mat_mul_vec(w, A, u, ctx);
+ _gr_vec_swap(v, w, ar, ctx);
+ GR_TMP_CLEAR_VEC(w, ar, ctx);
+ return status;
+ }
+
+ status |= _gr_vec_zero(v, ar, ctx);
+ for (i = 0; i < ar; ++i) {\
+ status |= gr_sparse_vec_dot_vec(GR_ENTRY(v, i, sz), GR_ENTRY(v, i, sz), 0, &A->rows[i], u, ctx);
+ }
+
+ return status;
+}
+
+#define GR_MAT_OOP_FN(status, M, ctx, fn, ...) \
+{ \
+ gr_mat_t T; \
+ gr_mat_init(T, M->r, M->c, ctx); \
+ status |= fn(T, __VA_ARGS__, ctx); \
+ status |= gr_mat_swap_entrywise(T, M, ctx); \
+ gr_mat_clear(T, ctx); \
+}
+
+int gr_csr_mat_mul_mat_transpose(gr_mat_t Ct, const gr_csr_mat_t A, const gr_mat_t Bt, gr_ctx_t ctx)
+{
+ slong ar, btr, i;
+ int status;
+
+ ar = gr_sparse_mat_nrows(A, ctx);
+ btr = gr_mat_nrows(Bt, ctx);
+
+ if (gr_sparse_mat_ncols(A, ctx) != gr_mat_ncols(Bt, ctx) || ar != gr_mat_ncols(Ct, ctx) || btr != gr_mat_nrows(Ct, ctx))
+ return GR_DOMAIN;
+
+ if (gr_csr_mat_is_zero(A, ctx) == T_TRUE)
+ return gr_mat_zero(Ct, ctx);
+
+ if (Bt == Ct)
+ {
+ status = GR_SUCCESS;
+ GR_MAT_OOP_FN(status, Ct, ctx, gr_csr_mat_mul_mat_transpose, A, Bt);
+ return status;
+ }
+
+ status = gr_mat_zero(Ct, ctx);
+ for (i = 0; i < btr; i++)
+ {
+ status |= gr_csr_mat_mul_vec(Ct->rows[i], A, Bt->rows[i], ctx);
+ }
+ return status;
+}
+
+
+int gr_lil_mat_mul_mat_transpose(gr_mat_t Ct, const gr_lil_mat_t A, const gr_mat_t Bt, gr_ctx_t ctx)
+{
+ slong ar, btr, i;
+ int status;
+
+ ar = gr_sparse_mat_nrows(A, ctx);
+ btr = gr_mat_nrows(Bt, ctx);
+
+ if (gr_sparse_mat_ncols(A, ctx) != gr_mat_ncols(Bt, ctx) || ar != gr_mat_ncols(Ct, ctx) || btr != gr_mat_nrows(Ct, ctx))
+ return GR_DOMAIN;
+
+ if (gr_lil_mat_is_zero(A, ctx) == T_TRUE)
+ return gr_mat_zero(Ct, ctx);
+
+ if (Bt == Ct)
+ {
+ status = GR_SUCCESS;
+ GR_MAT_OOP_FN(status, Ct, ctx, gr_lil_mat_mul_mat_transpose, A, Bt);
+ return status;
+ }
+
+ status = gr_mat_zero(Ct, ctx);
+ for (i = 0; i < btr; i++) {
+ status |= gr_lil_mat_mul_vec(Ct->rows[i], A, Bt->rows[i], ctx);
+ }
+ return status;
+}
+
+int gr_csr_mat_mul_mat(gr_mat_t C, const gr_csr_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+{
+ slong ar, bc, i, j, sz;
+ int status;
+
+ ar = gr_sparse_mat_nrows(A, ctx);
+ bc = gr_mat_ncols(B, ctx);
+
+ if (gr_sparse_mat_ncols(A, ctx) != gr_mat_nrows(B, ctx) || ar != gr_mat_nrows(C, ctx) || bc != gr_mat_ncols(C, ctx))
+ return GR_DOMAIN;
+
+ if (gr_csr_mat_is_zero(A, ctx) == T_TRUE)
+ return gr_mat_zero(C, ctx);
+
+ status = GR_SUCCESS;
+
+ if (B == C)
+ {
+ GR_MAT_OOP_FN(status, C, ctx, gr_csr_mat_mul_mat, A, B);
+ return status;
+ }
+
+ gr_mat_t Bt;
+ gr_mat_t Ct;
+ gr_method_void_unary_op set_shallow = GR_VOID_UNARY_OP(ctx, SET_SHALLOW);
+
+ sz = ctx->sizeof_elem;
+
+ TMP_INIT;
+ TMP_START;
+ _GR_MAT_INIT_SHALLOW_TRANSPOSE(Bt, B, sz);
+ _GR_MAT_INIT_SHALLOW_TRANSPOSE(Ct, C, sz);
+
+ status |= gr_csr_mat_mul_mat_transpose(Ct, A, Bt, ctx);
+
+ _GR_MAT_SHALLOW_TRANSPOSE(C, Ct, sz);
+
+ flint_free(Bt->rows);
+ flint_free(Ct->rows);
+ TMP_END;
+
+ return status;
+}
+
+int gr_lil_mat_mul_mat(gr_mat_t C, const gr_lil_mat_t A, const gr_mat_t B, gr_ctx_t ctx)
+{
+ slong ar, bc, i, j, sz;
+ int status;
+
+ ar = gr_sparse_mat_nrows(A, ctx);
+ bc = gr_mat_ncols(B, ctx);
+
+ if (gr_sparse_mat_ncols(A, ctx) != gr_mat_nrows(B, ctx) || ar != gr_mat_nrows(C, ctx) || bc != gr_mat_ncols(C, ctx))
+ return GR_DOMAIN;
+
+ if (gr_lil_mat_is_zero(A, ctx) == T_TRUE)
+ return gr_mat_zero(C, ctx);
+
+ status = GR_SUCCESS;
+
+ if (B == C)
+ {
+ GR_MAT_OOP_FN(status, C, ctx, gr_lil_mat_mul_mat, A, B);
+ return status;
+ }
+
+ gr_mat_t Bt;
+ gr_mat_t Ct;
+ gr_method_void_unary_op set_shallow = GR_VOID_UNARY_OP(ctx, SET_SHALLOW);
+
+ sz = ctx->sizeof_elem;
+
+ TMP_INIT;
+ TMP_START;
+ _GR_MAT_INIT_SHALLOW_TRANSPOSE(Bt, B, sz);
+ _GR_MAT_INIT_SHALLOW_TRANSPOSE(Ct, C, sz);
+
+ status |= gr_lil_mat_mul_mat_transpose(Ct, A, Bt, ctx);
+
+ _GR_MAT_SHALLOW_TRANSPOSE(C, Ct, sz);
+
+ flint_free(Bt->rows);
+ flint_free(Ct->rows);
+ TMP_END;
+ return status;
+}
diff --git a/src/gr_sparse_mat/neg.c b/src/gr_sparse_mat/neg.c
new file mode 100644
index 0000000000..b4193aebc6
--- /dev/null
+++ b/src/gr_sparse_mat/neg.c
@@ -0,0 +1,57 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+int gr_csr_mat_neg(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ {
+ return GR_DOMAIN;
+ }
+ status |= gr_csr_mat_set(dst, src, ctx);
+ status |= _gr_vec_neg(dst->nzs, dst->nzs, dst->nnz, ctx);
+ return status;
+}
+
+int gr_lil_mat_neg(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ int row;
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ {
+ return GR_DOMAIN;
+ }
+ dst->nnz = src->nnz;
+ for (row = 0; row < dst->r; row++)
+{
+ status |= gr_sparse_vec_neg(&dst->rows[row], &src->rows[row], ctx);
+ }
+ return status;
+}
+
+int gr_coo_mat_neg(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ {
+ return GR_DOMAIN;
+ }
+ status |= gr_coo_mat_set(dst, src, ctx);
+ status |= _gr_vec_neg(dst->nzs, dst->nzs, dst->nnz, ctx);
+ return status;
+}
diff --git a/src/gr_sparse_mat/nullspace.c b/src/gr_sparse_mat/nullspace.c
new file mode 100644
index 0000000000..70daaaa37b
--- /dev/null
+++ b/src/gr_sparse_mat/nullspace.c
@@ -0,0 +1,109 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2024 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "stdlib.h"
+#include "gr_sparse_mat.h"
+
+int gr_lil_mat_nullspace(gr_mat_t X, const gr_lil_mat_t M, flint_rand_t state, slong max_iters, const char *algorithm, slong block_size, gr_ctx_t ctx)
+{
+ /* Generate random solutions to a random system Mx = b and stop when nullspace filled */
+ slong i, j, c, iter, nullity, *x_pivots, sz;
+ gr_ptr x, coeff, *xs;
+ int status = GR_SUCCESS, cur_status;
+
+ sz = ctx->sizeof_elem;
+ c = gr_sparse_mat_ncols(M, ctx);
+
+ GR_TMP_INIT(coeff, ctx);
+ GR_TMP_INIT_VEC(x, c, ctx);
+ nullity = 0;
+ xs = NULL;
+ x_pivots = NULL;
+ for (iter = 0; iter < max_iters; )
+ {
+ if (strcmp(algorithm, "lanczos") == 0)
+ cur_status = gr_lil_mat_nullvector_lanczos(x, M, state, ctx);
+ else if (strcmp(algorithm, "wiedemann") == 0)
+ cur_status = gr_lil_mat_nullvector_wiedemann(x, M, state, ctx);
+ else if (strcmp(algorithm, "block lanczos") == 0)
+ cur_status = gr_lil_mat_nullvector_block_lanczos(x, M, block_size, state, ctx);
+ else if (strcmp(algorithm, "block wiedemann") == 0)
+ cur_status = gr_lil_mat_nullvector_block_wiedemann(x, M, block_size, state, ctx);
+ else
+ {
+ status = GR_DOMAIN;
+ break;
+ }
+ if (cur_status == GR_TEST_FAIL)
+ {
+ ++iter;
+ continue;
+ }
+
+ /* Reduce by existing kernel vectors */
+ for (j = nullity-1; j >= 0; --j)
+ {
+ status |= gr_neg(coeff, GR_ENTRY(x, x_pivots[j], sz), ctx);
+ status |= _gr_vec_addmul_scalar(x, xs[j], c, coeff, ctx);
+ }
+
+ /* Normalize last nonzero entry to 1 */
+ for (i = c-1; i >= 0; --i)
+ if (gr_is_zero(GR_ENTRY(x, i, sz), ctx) != T_TRUE)
+ break;
+ if (i == -1) {
+ /* x in span of xs, nullspace probably complete */
+ ++iter;
+ continue;
+ }
+ status |= gr_inv(coeff, GR_ENTRY(x, i, sz), ctx);
+ status |= _gr_vec_mul_scalar(x, x, c, coeff, ctx);
+
+ /* Reduce previous vectors by this one */
+ for (j = 0; j < nullity; ++j)
+ {
+ status |= gr_neg(coeff, GR_ENTRY(xs[j], i, sz), ctx);
+ status |= _gr_vec_addmul_scalar(xs[j], x, c, coeff, ctx);
+ }
+
+ /* Insert into list of vectors in nullspace (ordered by pivot) */
+ xs = flint_realloc(xs, (nullity+1)*sizeof(gr_ptr));
+ x_pivots = flint_realloc(x_pivots, (nullity+1)*sizeof(slong *));
+ for (j = 0; j < nullity; ++j)
+ if (i > x_pivots[j])
+ break;
+ memmove(xs + j + 1, xs + j, (nullity - j) * sizeof(gr_ptr));
+ memmove(x_pivots + j + 1, x_pivots + j, (nullity - j) * sizeof(slong *));
+ x_pivots[j] = i;
+ xs[j] = x; // Steal allocation
+ x = NULL;
+ GR_TMP_INIT_VEC(x, c, ctx);
+
+ // Advance nullity and restart iteration
+ nullity += 1;
+ iter = 0;
+ }
+ // Set X to have xs as column vectors
+ // TODO: can use shallow to reuse memory?
+ gr_mat_init(X, c, nullity, ctx);
+ for (i = 0; i < nullity; ++i)
+ for (j = 0; j < c; ++j)
+ status |= gr_set(gr_mat_entry_ptr(X, j, i, ctx), GR_ENTRY(xs[i], j, sz), ctx);
+
+ flint_free(x_pivots);
+ GR_TMP_CLEAR_VEC(x, c, ctx);
+ for (i = 0; i < nullity; ++i)
+ GR_TMP_CLEAR_VEC(xs[i], c, ctx);
+ flint_free(xs);
+ return status;
+}
+
diff --git a/src/gr_sparse_mat/permute_cols.c b/src/gr_sparse_mat/permute_cols.c
new file mode 100644
index 0000000000..7b921618dd
--- /dev/null
+++ b/src/gr_sparse_mat/permute_cols.c
@@ -0,0 +1,56 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+int gr_csr_mat_permute_cols(gr_csr_mat_t mat, slong * perm, gr_ctx_t ctx)
+{
+ slong row;
+ gr_sparse_vec_t tmp;
+ int status = GR_SUCCESS;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ _gr_csr_mat_borrow_row(tmp, mat, row, ctx);
+ status |= gr_sparse_vec_permute_inds(tmp, tmp, perm, ctx);
+ }
+ return status;
+}
+
+int gr_lil_mat_permute_cols(gr_lil_mat_t mat, slong * perm, gr_ctx_t ctx)
+{
+ slong row;
+ int status = GR_SUCCESS;
+
+ for (row = 0; row < mat->r; ++row)
+ {
+ status |= gr_sparse_vec_permute_inds(&mat->rows[row], &mat->rows[row], perm, ctx);
+ }
+ return status;
+}
+
+int gr_coo_mat_permute_cols(gr_coo_mat_t mat, slong * perm, gr_ctx_t ctx)
+{
+ slong nz_idx;
+ int status = GR_SUCCESS;
+
+ for (nz_idx = 0; nz_idx < mat->nnz; ++nz_idx)
+ {
+ mat->cols[nz_idx] = perm[mat->cols[nz_idx]];
+ }
+ if (mat->is_canonical == T_TRUE)
+ {
+ gr_coo_mat_canonicalize(mat, ctx);
+ }
+ return status;
+}
diff --git a/src/gr_sparse_mat/permute_rows.c b/src/gr_sparse_mat/permute_rows.c
new file mode 100644
index 0000000000..473c01b3f0
--- /dev/null
+++ b/src/gr_sparse_mat/permute_rows.c
@@ -0,0 +1,45 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+int gr_lil_mat_permute_rows(gr_lil_mat_t mat, const slong * perm, gr_ctx_t ctx)
+{
+ slong i, j;
+ slong *iperm;
+
+ /* todo: bounds checking */
+ if (perm == NULL)
+ {
+ return GR_DOMAIN;
+ }
+
+ // Get inverse permutation, i.e., iperm[i] = row to go in ith place
+ iperm = flint_malloc(mat->r * sizeof(slong));
+ for (i = 0; i < mat->r; ++i)
+ {
+ iperm[perm[i]] = i;
+ }
+
+ // Will do at most rows - 1 swaps
+ for (i = 0; i < mat->r - 1; ++i)
+ {
+ // Get element to permute with current location
+ for (j = iperm[i]; j < i; j = iperm[j]);
+ if (i != j)
+ {
+ gr_sparse_vec_swap(&mat->rows[i], &mat->rows[j], ctx);
+ }
+ }
+
+ flint_free(iperm);
+ return GR_SUCCESS;
+}
diff --git a/src/gr_sparse_mat/randtest.c b/src/gr_sparse_mat/randtest.c
new file mode 100644
index 0000000000..cbe5e54d03
--- /dev/null
+++ b/src/gr_sparse_mat/randtest.c
@@ -0,0 +1,126 @@
+#include
+#include "gr_sparse_mat.h"
+
+static int
+slong_cmp(const void * a, const void * b)
+{
+ slong ax = *((slong *) a);
+ slong bx = *((slong *) b);
+ return ax - bx;
+}
+
+
+int gr_coo_mat_randtest(gr_coo_mat_t mat, slong nnz, int replacement, truth_t is_canonical, flint_rand_t state, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong i, j, sz, len;
+ sz = ctx->sizeof_elem;
+ len = flint_mul_sizes(mat->r, mat->c);
+
+ if (nnz < 0 || nnz > len)
+ return GR_DOMAIN;
+
+ if (nnz == 0)
+ {
+ gr_coo_mat_zero(mat, ctx);
+ return GR_SUCCESS;
+ }
+
+ // Make space
+ gr_coo_mat_fit_nnz(mat, nnz, ctx);
+
+ if (replacement)
+ {
+ // Randomly sample nnz locations with replacement
+ for (i = 0; i < nnz; ++i)
+ mat->cols[i] = n_randint(state, len);
+
+ // If canonical, sort and compress
+ if (is_canonical == T_TRUE)
+ {
+ qsort(mat->cols, nnz, sizeof(slong), slong_cmp);
+
+ j = 0;
+ for (i = 0; i < nnz; ++i)
+ if (i == 0 || mat->cols[i] != mat->cols[i-1])
+ mat->cols[j++] = mat->cols[i];
+ mat->nnz = j;
+ }
+ else
+ mat->nnz = nnz;
+ }
+ else
+ {
+ // Randomly sample nnz columns without replacement
+ for (i = 0; i < len; ++i)
+ {
+ j = i < nnz ? i : n_randint(state, i+1);
+ if (j < nnz) mat->cols[j] = i;
+ }
+ if (is_canonical == T_TRUE && nnz < len)
+ qsort(mat->cols, nnz, sizeof(slong), slong_cmp);
+ mat->nnz = nnz;
+ }
+
+ for (i = 0; i < nnz; ++i)
+ {
+ mat->rows[i] = mat->cols[i] / mat->c;
+ mat->cols[i] %= mat->c;
+ }
+ mat->is_canonical = is_canonical;
+
+ for (i = 0; i < mat->nnz; ++i)
+ {
+ if (is_canonical == T_TRUE)
+ status |= gr_randtest_not_zero(GR_ENTRY(mat->nzs, i, sz), state, ctx);
+ else
+ status |= gr_randtest(GR_ENTRY(mat->nzs, i, sz), state, ctx);
+ }
+ return status;
+}
+
+int
+gr_coo_mat_randtest_prob(gr_coo_mat_t mat, double prob, flint_rand_t state, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong i, j, sz, len;
+ sz = ctx->sizeof_elem;
+ len = flint_mul_sizes(mat->r, mat->c);
+
+ if (prob < 0 || prob > 1)
+ return GR_DOMAIN;
+
+ // Handle corner cases
+ if (prob == 0)
+ {
+ gr_coo_mat_zero(mat, ctx);
+ return GR_SUCCESS;
+ }
+ if (prob == 1)
+ {
+ status |= gr_coo_mat_randtest(mat, len, 0, T_TRUE, state, ctx);
+ return status;
+ }
+
+ // Allocate space for expected number of nonzeroes, and expand as needed
+ gr_coo_mat_fit_nnz(mat, prob * len, ctx);
+
+ // TODO: for low probability, should be able to do this faster
+ mat->nnz = 0;
+ for (i = 0; i < mat->r; ++i)
+ {
+ for (j = 0; j < mat->c; ++j)
+ {
+ if (n_randint(state, 0) < 2 * prob * WORD_MAX)
+ {
+ if (mat->nnz == mat->alloc)
+ gr_coo_mat_fit_nnz(mat, mat->alloc * 2, ctx);
+ status |= gr_randtest_not_zero(GR_ENTRY(mat->nzs, mat->nnz, sz), state, ctx);
+ mat->rows[mat->nnz] = i;
+ mat->cols[mat->nnz++] = j;
+ }
+ }
+ }
+ mat->is_canonical = T_TRUE;
+ return status;
+}
diff --git a/src/gr_sparse_mat/rref.c b/src/gr_sparse_mat/rref.c
new file mode 100644
index 0000000000..c2c8a1f6e7
--- /dev/null
+++ b/src/gr_sparse_mat/rref.c
@@ -0,0 +1,119 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include
+#include
+#include "flint.h"
+#include "gr_sparse_vec.h"
+#include "gr_sparse_mat.h"
+
+int gr_lil_mat_rref(slong *res_rank, gr_lil_mat_t R, gr_lil_mat_t A, gr_ctx_t ctx)
+{
+ slong *P;
+ slong j, r, c, pr, pc, rank, remr, sz;
+ slong nr = A->r;
+ slong nc = A->c;
+ gr_lil_mat_t Rt;
+ gr_sparse_vec_struct *pcol, *prow, *row, *col;
+ gr_ptr cinv, cc;
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(A, R, ctx) != T_TRUE)
+ {
+ return GR_DOMAIN;
+ }
+ if (nr == 0 || nc == 0)
+ {
+ *res_rank = 0;
+ return GR_SUCCESS;
+ }
+ if (A->nnz == 0)
+ {
+ gr_lil_mat_zero(R, ctx);
+ return GR_SUCCESS;
+ }
+ // Get transpose and copy of A
+ gr_lil_mat_init(Rt, nc, nr, ctx);
+ status |= gr_lil_mat_transpose(Rt, A, ctx);
+ status |= gr_lil_mat_set(R, A, ctx);
+
+ GR_TMP_INIT2(cinv, cc, ctx);
+
+ sz = ctx->sizeof_elem;
+
+ /* Set up permutations */
+ P = flint_malloc(nr*sizeof(*P));
+ remr = nr;
+ for (r = 0; r < nr; ++r)
+ {
+ if (!A->rows[r].nnz || A->rows[r].inds[0] >= nc) P[r] = --remr;
+ else P[r] = -1;
+ }
+
+ /* Run elimination */
+ rank = 0;
+ for (pc = 0; pc < nc; ++pc)
+ {
+ pcol = &Rt->rows[pc];
+
+ /* Get lowest weight incident row not used as previous pivot */
+ pr = -1, prow = NULL;
+ for (j = 0; j < pcol->nnz; ++j)
+ {
+ r = pcol->inds[j], row = &R->rows[r];
+ if (P[r] >= 0) continue;
+ if (pr==-1 || (row->nnz < prow->nnz)) pr = r, prow = row;
+ }
+ if (pr == -1) continue;
+ P[pr] = rank;
+
+ status |= gr_inv(cinv, gr_sparse_vec_find_entry(prow, pc, ctx), ctx);
+ status |= gr_sparse_vec_mul_scalar(prow, prow, cinv, ctx);
+
+ /* Gaussian eliminate rows */
+ for (j = 0; j < pcol->nnz; ++j)
+ {
+ r = pcol->inds[j], row = &R->rows[r];
+ if (r==pr) {status |= gr_zero(GR_ENTRY(pcol->nzs, j, sz), ctx); continue;}
+
+ status |= gr_neg(cc, gr_sparse_vec_find_entry(row, pc, ctx), ctx);
+ status |= gr_sparse_vec_addmul_scalar(row, prow, cc, ctx);
+ if (row->nnz == 0 || row->inds[0] >= nc) P[r] = --remr;
+ }
+ /* Gaussian eliminate cols */
+ status |= gr_sparse_vec_mul_scalar(pcol, pcol, cinv, ctx);
+ for (j = 0; j < prow->nnz; ++j)
+ {
+ c = prow->inds[j], col = &Rt->rows[c];
+ if (c >= nc || c==pc) continue;
+ status |= gr_neg(cc, gr_sparse_vec_find_entry(col, pr, ctx), ctx);
+ status |= gr_sparse_vec_addmul_scalar(col, pcol, cc, ctx);
+ }
+ rank += 1;
+ }
+ gr_lil_mat_clear(Rt, ctx);
+
+ /* Fix nnz */
+ R->nnz = 0;
+ for (j = 0; j < nr; ++j)
+ {
+ R->nnz += R->rows[j].nnz;
+ }
+
+ /* Reorder rows */
+ status |= gr_lil_mat_permute_rows(R, P, ctx);
+ flint_free(P);
+ GR_TMP_CLEAR2(cinv, cc, ctx);
+ *res_rank = rank;
+ return status;
+}
diff --git a/src/gr_sparse_mat/scalar.c b/src/gr_sparse_mat/scalar.c
new file mode 100644
index 0000000000..c14493a4cf
--- /dev/null
+++ b/src/gr_sparse_mat/scalar.c
@@ -0,0 +1,164 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_mat.h"
+
+#define GR_CSR_MAT_DENSE_VEC_OP(dense_vec_op, dst, src, c, ctx) \
+ if(dst->r != src->r || dst->c != src->c) \
+ { \
+ return GR_DOMAIN; \
+ } \
+ if(dst != src) \
+ { \
+ gr_csr_mat_fit_nnz(dst, src->nnz, ctx); \
+ dst->nnz = src->nnz; \
+ memcpy(dst->rows, src->rows, (src->r+1)*sizeof(ulong)); \
+ memcpy(dst->cols, src->cols, src->nnz*sizeof(ulong)); \
+ } \
+ return dense_vec_op(dst->nzs, src->nzs, src->nnz, c, ctx); \
+
+int gr_csr_mat_mul_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar, dst, src, c, ctx) }
+int gr_csr_mat_mul_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_si, dst, src, c, ctx) }
+int gr_csr_mat_mul_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_ui, dst, src, c, ctx) }
+int gr_csr_mat_mul_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpz, dst, src, c, ctx) }
+int gr_csr_mat_mul_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpq, dst, src, c, ctx) }
+int gr_csr_mat_mul_scalar_2exp_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_2exp_si, dst, src, c, ctx) }
+int gr_csr_mat_div_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_div_scalar, dst, src, c, ctx) }
+int gr_csr_mat_div_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_si, dst, src, c, ctx) }
+int gr_csr_mat_div_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_ui, dst, src, c, ctx) }
+int gr_csr_mat_div_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_fmpz, dst, src, c, ctx) }
+int gr_csr_mat_div_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_fmpq, dst, src, c, ctx) }
+int gr_csr_mat_divexact_scalar(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar, dst, src, c, ctx) }
+int gr_csr_mat_divexact_scalar_si(gr_csr_mat_t dst, const gr_csr_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_si, dst, src, c, ctx) }
+int gr_csr_mat_divexact_scalar_ui(gr_csr_mat_t dst, const gr_csr_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_ui, dst, src, c, ctx) }
+int gr_csr_mat_divexact_scalar_fmpz(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpz, dst, src, c, ctx) }
+int gr_csr_mat_divexact_scalar_fmpq(gr_csr_mat_t dst, const gr_csr_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_CSR_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpq, dst, src, c, ctx) }
+
+#define GR_LIL_MAT_DENSE_VEC_OP(dense_vec_op, dst, src, c, ctx) { \
+ int status = GR_SUCCESS; \
+ int row; \
+ if(dst->r != src->r || dst->c != src->c) \
+ { \
+ return GR_DOMAIN; \
+ } \
+ dst->nnz = src->nnz; \
+ for (row = 0; row < src->r; ++row) \
+ { \
+ if(dst != src) \
+ { \
+ gr_sparse_vec_fit_nnz(&dst->rows[row], src->rows[row].nnz, ctx); \
+ dst->rows[row].nnz = src->rows[row].nnz; \
+ memcpy(dst->rows[row].inds, src->rows[row].inds, src->rows[row].nnz*sizeof(ulong)); \
+ } \
+ status |= dense_vec_op(dst->rows[row].nzs, src->rows[row].nzs, src->rows[row].nnz, c, ctx); \
+ } \
+ return status; \
+}
+
+int gr_lil_mat_mul_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar, dst, src, c, ctx) }
+int gr_lil_mat_mul_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_si, dst, src, c, ctx) }
+int gr_lil_mat_mul_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_ui, dst, src, c, ctx) }
+int gr_lil_mat_mul_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpz, dst, src, c, ctx) }
+int gr_lil_mat_mul_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpq, dst, src, c, ctx) }
+int gr_lil_mat_mul_scalar_2exp_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_2exp_si, dst, src, c, ctx) }
+int gr_lil_mat_div_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_div_scalar, dst, src, c, ctx) }
+int gr_lil_mat_div_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_si, dst, src, c, ctx) }
+int gr_lil_mat_div_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_ui, dst, src, c, ctx) }
+int gr_lil_mat_div_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_fmpz, dst, src, c, ctx) }
+int gr_lil_mat_div_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_fmpq, dst, src, c, ctx) }
+int gr_lil_mat_divexact_scalar(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar, dst, src, c, ctx) }
+int gr_lil_mat_divexact_scalar_si(gr_lil_mat_t dst, const gr_lil_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_si, dst, src, c, ctx) }
+int gr_lil_mat_divexact_scalar_ui(gr_lil_mat_t dst, const gr_lil_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_ui, dst, src, c, ctx) }
+int gr_lil_mat_divexact_scalar_fmpz(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpz, dst, src, c, ctx) }
+int gr_lil_mat_divexact_scalar_fmpq(gr_lil_mat_t dst, const gr_lil_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_LIL_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpq, dst, src, c, ctx) }
+
+
+#define GR_COO_MAT_DENSE_VEC_OP(dense_vec_op, dst, src, c, ctx) \
+ if(dst->r != src->r || dst->c != src->c) \
+ { \
+ return GR_DOMAIN; \
+ } \
+ if(dst != src) \
+ { \
+ gr_coo_mat_fit_nnz(dst, src->nnz, ctx); \
+ dst->nnz = src->nnz; \
+ memcpy(dst->rows, src->rows, src->nnz*sizeof(ulong)); \
+ memcpy(dst->cols, src->cols, src->nnz*sizeof(ulong)); \
+ dst->is_canonical = src->is_canonical; \
+ } \
+ return dense_vec_op(dst->nzs, src->nzs, src->nnz, c, ctx); \
+
+int gr_coo_mat_mul_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar, dst, src, c, ctx) }
+int gr_coo_mat_mul_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_si, dst, src, c, ctx) }
+int gr_coo_mat_mul_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_ui, dst, src, c, ctx) }
+int gr_coo_mat_mul_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpz, dst, src, c, ctx) }
+int gr_coo_mat_mul_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpq, dst, src, c, ctx) }
+int gr_coo_mat_mul_scalar_2exp_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_mul_scalar_2exp_si, dst, src, c, ctx) }
+int gr_coo_mat_div_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_div_scalar, dst, src, c, ctx) }
+int gr_coo_mat_div_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_si, dst, src, c, ctx) }
+int gr_coo_mat_div_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_ui, dst, src, c, ctx) }
+int gr_coo_mat_div_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_fmpz, dst, src, c, ctx) }
+int gr_coo_mat_div_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_div_scalar_fmpq, dst, src, c, ctx) }
+int gr_coo_mat_divexact_scalar(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar, dst, src, c, ctx) }
+int gr_coo_mat_divexact_scalar_si(gr_coo_mat_t dst, const gr_coo_mat_t src, slong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_si, dst, src, c, ctx) }
+int gr_coo_mat_divexact_scalar_ui(gr_coo_mat_t dst, const gr_coo_mat_t src, ulong c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_ui, dst, src, c, ctx) }
+int gr_coo_mat_divexact_scalar_fmpz(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpz, dst, src, c, ctx) }
+int gr_coo_mat_divexact_scalar_fmpq(gr_coo_mat_t dst, const gr_coo_mat_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_COO_MAT_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpq, dst, src, c, ctx) }
diff --git a/src/gr_sparse_mat/set.c b/src/gr_sparse_mat/set.c
new file mode 100644
index 0000000000..54678bf522
--- /dev/null
+++ b/src/gr_sparse_mat/set.c
@@ -0,0 +1,460 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "stdlib.h"
+#include "gr_sparse_mat.h"
+
+typedef struct
+{
+ slong i;
+ slong row;
+ slong col;
+ void * entry;
+}
+sparse_mat_index_t;
+
+static int sparse_mat_index_cmp(const void* a, const void* b)
+{
+ slong arow = ((sparse_mat_index_t*)(a))->row;
+ slong brow = ((sparse_mat_index_t*)(b))->row;
+ slong acol = ((sparse_mat_index_t*)(a))->col;
+ slong bcol = ((sparse_mat_index_t*)(b))->col;
+ return (arow < brow ? -1 : (arow > brow ? 1 : (acol < bcol ? -1 : (acol > bcol ? 1 : 0))));
+}
+
+sparse_mat_index_t * _sort_coords(ulong * rows, ulong * cols, const void * entries, slong sz, slong num)
+{
+ slong i;
+ sparse_mat_index_t * si;
+
+ si = flint_malloc(num * sizeof(sparse_mat_index_t));
+ for (i = 0; i < num; i++)
+ {
+ si[i].i = i;
+ si[i].row = rows[i];
+ si[i].col = cols[i];
+ si[i].entry = (void *) (((char *) (entries)) + ((i) * (sz)));
+ }
+
+ qsort(si, num, sizeof(sparse_mat_index_t), sparse_mat_index_cmp);
+ return si;
+}
+
+int gr_csr_mat_set(gr_csr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx) {
+ int status = GR_SUCCESS;
+
+ if (dst != src)
+ {
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ {
+ return GR_DOMAIN;
+ }
+ dst->nnz = src->nnz;
+ gr_csr_mat_fit_nnz(dst, src->nnz, ctx);
+ memcpy(dst->rows, src->rows, (src->r+1) * sizeof(ulong));
+ memcpy(dst->cols, src->cols, src->nnz * sizeof(ulong));
+ status = _gr_vec_set(dst->nzs, src->nzs, src->nnz, ctx);
+ }
+ return status;
+}
+
+int gr_lil_mat_set(gr_lil_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx) {
+ ulong row;
+ int status = GR_SUCCESS;
+
+ if (dst != src)
+ {
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ {
+ return GR_DOMAIN;
+ }
+ dst->nnz = src->nnz;
+
+ for (row = 0; row < src->r; ++row) {
+ status |= gr_sparse_vec_set(&dst->rows[row], &src->rows[row], ctx);
+ }
+ }
+ return status;
+}
+
+int gr_coo_mat_set(gr_coo_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx) {
+ int status = GR_SUCCESS;
+
+ if (dst != src)
+ {
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ {
+ return GR_DOMAIN;
+ }
+ dst->nnz = src->nnz;
+ gr_coo_mat_fit_nnz(dst, src->nnz, ctx);
+ memcpy(dst->rows, src->rows, src->nnz * sizeof(ulong));
+ memcpy(dst->cols, src->cols, src->nnz * sizeof(ulong));
+ status = _gr_vec_set(dst->nzs, src->nzs, src->nnz, ctx);
+ dst->is_canonical = src->is_canonical;
+ }
+ return status;
+}
+
+int gr_csr_mat_set_lil_mat(gr_csr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ ulong row;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t dst_row;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ gr_csr_mat_fit_nnz(dst, src->nnz, ctx);
+
+ dst->rows[0] = 0;
+ for(row = 0; row < src->r; row++) {
+ dst->rows[row+1] = dst->rows[row] + src->rows[row].nnz;
+ _gr_csr_mat_borrow_row(dst_row, dst, row, ctx);
+ status |= gr_sparse_vec_set(dst_row, &src->rows[row], ctx);
+ }
+ dst->nnz = src->nnz;
+
+ return status;
+}
+
+int gr_lil_mat_set_csr_mat(gr_lil_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ ulong row;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t mat_row;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ for(row = 0; row < src->r; row++) {
+ _gr_csr_mat_borrow_row(mat_row, src, row, ctx);
+ status |= gr_sparse_vec_set(&dst->rows[row], mat_row, ctx);
+ }
+ dst->nnz = src->nnz;
+
+ return status;
+}
+
+int gr_csr_mat_set_coo_mat(gr_csr_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ // Sort entries by row and column
+ slong i, sz, nnz, row;
+ int status = GR_SUCCESS;
+ sparse_mat_index_t * si;
+ gr_ptr entry;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ if (src->nnz == 0)
+ {
+ gr_csr_mat_zero(dst, ctx);
+ return GR_SUCCESS;
+ }
+
+ sz = ctx->sizeof_elem;
+
+ gr_csr_mat_fit_nnz(dst, src->nnz, ctx);
+ if (src->is_canonical == T_TRUE)
+ {
+ // Just copy over the data and set the row offsets
+ dst->rows[0] = 0;
+ row = 0;
+ for (i = 0; i < src->nnz; ++i)
+ while (row != src->rows[i])
+ dst->rows[++row] = i;
+ while (row < dst->r)
+ dst->rows[++row] = i;
+
+ memcpy(dst->cols, src->cols, src->nnz * sizeof(ulong));
+ status |= _gr_vec_set(dst->nzs, src->nzs, src->nnz, ctx);
+ //_gr_vec_print(dst->nzs, src->nnz, ctx);
+ dst->nnz = src->nnz;
+ }
+ else
+ {
+ // Sort coordinates
+ si = _sort_coords(src->rows, src->cols, src->nzs, sz, src->nnz);
+
+ // Accumulate nonzeroes into matrix
+ row = 0;
+ nnz = 0;
+ dst->rows[0] = 0;
+ entry = NULL;
+ for (i = 0; i < src->nnz; ++i)
+ {
+ //flint_printf("(%d, %d)\n", si[i].row, si[i].col);
+
+ // Check if we can just accumulate
+ if(i > 0 && si[i-1].row == si[i].row && si[i-1].col == si[i].col)
+ status |= gr_add(entry, entry, si[i].entry, ctx);
+ else
+ {
+ // Advance row offsets as needed
+
+ // If previous entry does not exist or is not zero, advance to the next one
+ if (entry == NULL || is_zero(entry, ctx) != T_TRUE)
+ {
+ entry = GR_ENTRY(dst->nzs, nnz, sz);
+ ++nnz;
+ }
+ while (row != si[i].row)
+ dst->rows[++row] = nnz - 1;
+ dst->cols[nnz - 1] = si[i].col;
+ status |= gr_set(entry, si[i].entry, ctx);
+ }
+ }
+ if (entry != NULL && is_zero(entry, ctx) == T_TRUE)
+ --nnz;
+
+ // Set remaining row offsets and overall number of nonzeroes
+ while (row < dst->r)
+ dst->rows[++row] = nnz;
+ dst->nnz = nnz;
+ }
+ return status;
+}
+
+int gr_lil_mat_set_coo_mat(gr_lil_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ slong i, sz, nnz, row, row_start_idx, row_end_idx;
+ int status = GR_SUCCESS;
+ sparse_mat_index_t * si;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+ gr_ptr entry;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ if (src->nnz == 0)
+ {
+ gr_lil_mat_zero(dst, ctx);
+ return GR_SUCCESS;
+ }
+
+ sz = ctx->sizeof_elem;
+ if (src->is_canonical == T_TRUE)
+ {
+ row_start_idx = row_end_idx = 0;
+ for (row = 0; row < dst->r; ++row)
+ {
+ while (row_end_idx < src->nnz && src->rows[row_end_idx] == row) ++row_end_idx;
+ status |= gr_sparse_vec_from_entries(
+ &dst->rows[row], src->cols + row_start_idx, GR_ENTRY(src->nzs, row_start_idx, sz), row_end_idx - row_start_idx, T_TRUE, ctx
+ );
+ row_start_idx = row_end_idx;
+ }
+ dst->nnz = src->nnz;
+ }
+ else
+ {
+ // Sort entries by row and column
+ si = _sort_coords(src->rows, src->cols, src->nzs, sz, src->nnz);
+
+ // Construct rows one by one
+ row_start_idx = 0;
+ dst->nnz = 0;
+ for (row = 0; row < dst->r; ++row)
+ {
+ // Get range of indicies in current row and estimate of nonzeroes
+ for (row_end_idx = row_start_idx; row_end_idx < src->nnz; ++row_end_idx)
+ if (si[row_end_idx].row != row)
+ break;
+ //flint_printf("(%d, %d)\n", row_start_idx, row_end_idx);
+ gr_sparse_vec_fit_nnz(&dst->rows[row], row_end_idx - row_start_idx, ctx);
+
+ // Add nonzeroes to row
+ entry = NULL;
+ nnz = 0;
+ for (i = row_start_idx; i < row_end_idx; ++i)
+ {
+ //flint_printf("\t(%d, %d)\n", si[i].row, si[i].col);
+ // Skip zero entries
+ if (is_zero(si[i].entry, ctx) == T_TRUE)
+ continue;
+
+ // Check if we can just accumulate
+ if(i > row_start_idx && si[i-1].col == si[i].col)
+ status |= gr_add(entry, entry, si[i].entry, ctx);
+ else
+ {
+ // Check if need to get new entry to store to
+ if (entry == NULL || is_zero(entry, ctx) != T_TRUE)
+ {
+ entry = GR_ENTRY(dst->rows[row].nzs, nnz, sz);
+ ++nnz;
+ }
+ // Store to entry and update current column
+ dst->rows[row].inds[nnz - 1] = si[i].col;
+ status |= gr_set(entry, si[i].entry, ctx);
+ }
+ }
+ if (entry != NULL && is_zero(entry, ctx) == T_TRUE)
+ --nnz;
+ dst->rows[row].nnz = nnz;
+ dst->nnz += nnz;
+ row_start_idx = row_end_idx;
+ }
+ }
+
+ return status;
+}
+
+int gr_coo_mat_set_lil_mat(gr_coo_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ ulong row, i;
+ slong sz;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_struct *vec;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ gr_coo_mat_fit_nnz(dst, src->nnz, ctx);
+
+ sz = ctx->sizeof_elem;
+
+ dst->nnz = 0;
+ for(row = 0; row < src->r; row++) {
+ vec = &src->rows[row];
+ for (i = 0; i < vec->nnz; ++i)
+ dst->rows[dst->nnz + i] = row;
+
+ memcpy(dst->cols + dst->nnz, vec->inds, vec->nnz * sizeof(ulong));
+ status |= _gr_vec_set(GR_ENTRY(dst->nzs, dst->nnz, sz), vec->nzs, vec->nnz, ctx);
+ dst->nnz += vec->nnz;
+ }
+ dst->nnz = src->nnz;
+ dst->is_canonical = T_TRUE;
+ return status;
+}
+
+int gr_coo_mat_set_csr_mat(gr_coo_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ ulong row;
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ gr_coo_mat_fit_nnz(dst, src->nnz, ctx);
+
+ dst->nnz = 0;
+ for(row = 0; row < dst->r; ++row)
+ while (dst->nnz < src->rows[row + 1])
+ dst->rows[dst->nnz++] = row;
+ memcpy(dst->cols, src->cols, src->nnz * sizeof(ulong));
+ status |= _gr_vec_set(dst->nzs, src->nzs, src->nnz, ctx);
+ dst->is_canonical = T_TRUE;
+
+ return status;
+}
+
+int gr_csr_mat_set_mat(gr_csr_mat_t dst, const gr_mat_t src, gr_ctx_t ctx)
+{
+ ulong row, col;
+ slong nnz, sz;
+ int status = GR_SUCCESS;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ // Get number of nonzero entries
+ sz = ctx->sizeof_elem;
+ nnz = 0;
+ for (row = 0; row < src->r; ++row)
+ for (col = 0; col < src->c; ++col)
+ if (is_zero(GR_MAT_ENTRY(src, row, col, sz), ctx) != T_TRUE)
+ nnz++;
+ gr_csr_mat_fit_nnz(dst, nnz, ctx);
+
+ // Construct sparse matrix from nonzeroes
+ dst->rows[0] = 0;
+ nnz = 0;
+ for(row = 0; row < src->r; row++)
+ {
+ for (col = 0; col < src->c; ++col)
+ {
+ if (is_zero(GR_MAT_ENTRY(src, row, col, sz), ctx) != T_TRUE)
+ {
+ dst->cols[nnz] = col;
+ status |= gr_set(GR_ENTRY(dst->nzs, nnz, sz), GR_MAT_ENTRY(src, row, col, sz), ctx);
+ ++nnz;
+ }
+ }
+ dst->rows[row + 1] = nnz;
+ }
+ dst->nnz = nnz;
+
+ return status;
+}
+
+int gr_lil_mat_set_mat(gr_lil_mat_t dst, const gr_mat_t src, gr_ctx_t ctx)
+{
+ ulong row;
+ slong sz;
+ int status = GR_SUCCESS;
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ sz = ctx->sizeof_elem;
+ dst->nnz = 0;
+ for(row = 0; row < src->r; row++) {
+ status |= gr_sparse_vec_set_vec(&dst->rows[row], GR_MAT_ENTRY(src, row, 0, sz), src->c, ctx);
+ dst->nnz += dst->rows[row].nnz;
+ }
+
+ return status;
+}
+
+int gr_coo_mat_set_mat(gr_coo_mat_t dst, const gr_mat_t src, gr_ctx_t ctx)
+{
+ ulong row, col;
+ slong sz, nnz;
+ int status = GR_SUCCESS;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+
+ if (gr_mat_is_compatible(dst, src, ctx) == T_FALSE)
+ return GR_DOMAIN;
+
+ sz = ctx->sizeof_elem;
+
+ // Get number of nonzero entries
+ nnz = 0;
+ for (row = 0; row < src->r; ++row)
+ for (col = 0; col < src->c; ++col)
+ if (is_zero(GR_MAT_ENTRY(src, row, col, sz), ctx) != T_TRUE)
+ nnz++;
+ gr_coo_mat_fit_nnz(dst, nnz, ctx);
+
+ // Construct sparse matrix from nonzeroes
+ nnz = 0;
+ for(row = 0; row < src->r; row++)
+ {
+ for (col = 0; col < src->c; ++col)
+ {
+ if (is_zero(GR_MAT_ENTRY(src, row, col, sz), ctx) != T_TRUE)
+ {
+ dst->rows[nnz] = row;
+ dst->cols[nnz] = col;
+ status |= gr_set(GR_ENTRY(dst->nzs, nnz, sz), GR_MAT_ENTRY(src, row, col, sz), ctx);
+ ++nnz;
+ }
+ }
+ }
+ dst->nnz = nnz;
+
+ return status;
+}
diff --git a/src/gr_sparse_mat/set_cols.c b/src/gr_sparse_mat/set_cols.c
new file mode 100644
index 0000000000..ace19d9853
--- /dev/null
+++ b/src/gr_sparse_mat/set_cols.c
@@ -0,0 +1,69 @@
+#include "gr_sparse_mat.h"
+
+void
+gr_csr_mat_set_cols(gr_csr_mat_t mat, slong c, gr_ctx_t ctx)
+{
+ slong idx, new_idx, row;
+ slong sz = ctx->sizeof_elem;
+
+ if (c < mat->c)
+ {
+ // Keep track of row to update offsets
+ row = 1;
+
+ // May have some columns to discard
+ for (idx = new_idx = 0; idx < mat->nnz; ++idx)
+ {
+ if (mat->rows[row] == idx)
+ {
+ mat->rows[row++] = new_idx;
+ }
+ if (mat->cols[idx] < c && idx != new_idx)
+ {
+ // Move this entry down in array
+ mat->cols[new_idx] = mat->cols[idx];
+ gr_swap(GR_ENTRY(mat->nzs, new_idx, sz), GR_ENTRY(mat->nzs, idx, sz), ctx);
+ ++new_idx;
+ }
+ }
+ mat->nnz = new_idx;
+ }
+ mat->c = c;
+ }
+
+void
+gr_lil_mat_set_cols(gr_lil_mat_t mat, slong c, gr_ctx_t ctx)
+{
+ slong row;
+
+ mat->c = c;
+ for (row = 0; row < mat->r; ++row)
+ {
+ gr_sparse_vec_set_length(&mat->rows[row], c, ctx);
+ }
+}
+
+void
+gr_coo_mat_set_cols(gr_coo_mat_t mat, slong c, gr_ctx_t ctx)
+{
+ slong idx, new_idx;
+ slong sz = ctx->sizeof_elem;
+
+ if (c < mat->c)
+ {
+ // May have some columns to discard
+ for (idx = new_idx = 0; idx < mat->nnz; ++idx)
+ {
+ if (mat->cols[idx] < c && idx != new_idx)
+ {
+ // Move this entry down in array
+ mat->rows[new_idx] = mat->rows[idx];
+ mat->cols[new_idx] = mat->cols[idx];
+ gr_swap(GR_ENTRY(mat->nzs, new_idx, sz), GR_ENTRY(mat->nzs, idx, sz), ctx);
+ ++new_idx;
+ }
+ }
+ mat->nnz = new_idx;
+ }
+ mat->c = c;
+ }
diff --git a/src/gr_sparse_mat/set_entry.c b/src/gr_sparse_mat/set_entry.c
new file mode 100644
index 0000000000..d4af82c25e
--- /dev/null
+++ b/src/gr_sparse_mat/set_entry.c
@@ -0,0 +1,78 @@
+#include
+#include "gr_sparse_mat.h"
+
+static int ulong_cmp(const void* a, const void* b)
+{
+ ulong av = *((ulong*)(a));
+ ulong bv = *((ulong*)(b));
+ return (av < bv ? -1 : (av > bv ? 1 : 0));
+}
+
+int
+gr_csr_mat_set_entry(gr_csr_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx)
+{
+ slong i, j;
+ slong sz = ctx->sizeof_elem;
+ ulong* bs = NULL;
+
+ if (row < 0 || row >= mat->r || col < 0 || col >= mat->c)
+ return GR_DOMAIN;
+
+ bs = bsearch(&col, mat->cols + mat->rows[row], mat->rows[row+1] - mat->rows[row], sizeof(ulong), ulong_cmp);
+
+ if (bs != NULL)
+ {
+ i = bs - mat->cols;
+ if (gr_is_zero(entry, ctx) == T_TRUE)
+ {
+ // Shift everything above i down
+ for (j = row; j <= mat->r; ++j)
+ --mat->rows[j];
+ memmove(mat->cols + i, mat->cols + i + 1, (mat->nnz - i - 1)*sizeof(slong));
+ for (j = i; j < mat->nnz; j++)
+ {
+ gr_swap(GR_ENTRY(mat->nzs, j, sz), GR_ENTRY(mat->nzs, j + 1, sz), ctx);
+ }
+ --mat->nnz;
+
+ return GR_SUCCESS;
+ }
+ }
+ else
+ {
+ if (gr_is_zero(entry, ctx) == T_TRUE)
+ {
+ // Already 0
+ return GR_SUCCESS;
+ }
+ // Make space for one more nonzero
+ gr_csr_mat_fit_nnz(mat, mat->nnz+1, ctx);
+
+ // Find location
+ for (i = mat->rows[row]; i < mat->rows[row + 1]; ++i)
+ if (col < mat->cols[i])
+ break;
+
+ // Shift everything above i up
+ for (j = row; j <= mat->r; ++j)
+ ++mat->rows[j];
+ memmove(mat->cols + i + 1, mat->cols + i, (mat->nnz - i)*sizeof(slong));
+ for (j = mat->nnz; j > i; j--)
+ {
+ gr_swap(GR_ENTRY(mat->nzs, j-1, sz), GR_ENTRY(mat->nzs, j, sz), ctx);
+ }
+
+ mat->cols[i] = col;
+ ++mat->nnz;
+ }
+ return gr_set(GR_ENTRY(mat->nzs, i, sz), entry, ctx);
+}
+
+int
+gr_lil_mat_set_entry(gr_lil_mat_t mat, slong row, slong col, gr_srcptr entry, gr_ctx_t ctx)
+{
+ if (row < 0 || row >= mat->r)
+ return GR_DOMAIN;
+
+ return gr_sparse_vec_set_entry(&mat->rows[row], col, entry, ctx);
+}
diff --git a/src/gr_sparse_mat/solve_block_lanczos.c b/src/gr_sparse_mat/solve_block_lanczos.c
new file mode 100644
index 0000000000..e0d3ed2871
--- /dev/null
+++ b/src/gr_sparse_mat/solve_block_lanczos.c
@@ -0,0 +1,289 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ Algorithm taken from P. Montgomery, "A Block Lanczos Algorithm for
+ Finding Dependencies over GF(2)", Advances in Cryptology - EUROCRYPT '95
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include
+#include
+#include "flint.h"
+#include "gr_sparse_mat.h"
+
+/* Run row Gaussian elimination on first b block of [T I], save that, */
+/* if no pivot is found for a given column c, we Gaussian eliminate */
+/* the column c + b and zero out the row c. In addition, we reorder */
+/* columns so that ones corresponding to zero entries in S go first. */
+/* See Figure 1 in the above reference for details. */
+static int compute_nWi_S(slong *rk, gr_mat_t nWi, int *S, const gr_mat_t Torig, gr_ctx_t ctx)
+{
+ const slong b = Torig->r;
+ slong pc, i, j;
+ gr_mat_t T;
+ gr_mat_struct *X;
+ slong *P;
+ gr_ptr cc;
+ int status = GR_SUCCESS;
+
+ GR_TMP_INIT(cc, ctx);
+ P = flint_malloc(b * sizeof(*P));
+ gr_mat_init(T, b, b, ctx);
+ status |= gr_mat_set(T, Torig, ctx);
+ status |= gr_mat_one(nWi, ctx);
+
+ /* Set permutation to have previously dependent vectors at front */
+ P = flint_malloc(b*sizeof(*P));
+ j = 0;
+ for (i = 0; i < b; ++i) if (!S[i]) P[j++] = i;
+ for (i = 0; i < b; ++i) if (S[i]) P[j++] = i;
+
+ *rk = 0;
+ for (j = 0; j < b; ++j)
+ {
+ pc = P[j]; /* Pivot col */
+
+ /* Find viable pivot row (from T if possible, then from W) */
+ for (X = T, i = j; i < b; ++i)
+ if (gr_is_zero(gr_mat_entry_srcptr(X, P[i], pc, ctx), ctx) == T_FALSE)
+ break;
+ if (i == b)
+ for (X = nWi, i = j; i < b; ++i)
+ if (gr_is_zero(gr_mat_entry_srcptr(X, P[i], pc, ctx), ctx) == T_FALSE)
+ break;
+ S[pc] = X == T; /* Viable column in V */
+ status |= gr_mat_swap_rows(T, NULL, pc, P[i], ctx);
+ status |= gr_mat_swap_rows(nWi, NULL, pc, P[i], ctx); /* Now pivot row = pivot col */
+
+ /* Make pivot one */
+ status |= gr_inv(cc, gr_mat_entry_ptr(X, pc, pc, ctx), ctx);
+ status |= _gr_vec_mul_scalar(T->rows[pc], T->rows[pc], b, cc, ctx);
+ status |= _gr_vec_mul_scalar(nWi->rows[pc], nWi->rows[pc], b, cc, ctx);
+
+
+ /* Kill all other entries in pivot column */
+ for (i = 0; i < b; ++i)
+ {
+ status |= gr_neg(cc, gr_mat_entry_ptr(X, P[i], pc, ctx), ctx);
+ if (i == j || gr_is_zero(cc, ctx) == T_TRUE) continue;
+ status |= _gr_vec_addmul_scalar(T->rows[P[i]], T->rows[pc], T->c, cc, ctx);
+ status |= _gr_vec_addmul_scalar(nWi->rows[P[i]], nWi->rows[pc], nWi->c, cc, ctx);
+ }
+ if (S[pc]) (*rk)++; /* Count viable columns */
+ else
+ {
+ /* Kill row of both matrices */
+ status |= _gr_vec_zero(T->rows[pc], b, ctx);
+ status |= _gr_vec_zero(nWi->rows[pc], b, ctx);
+ }
+ }
+
+ status |= gr_mat_neg(nWi, nWi, ctx);
+ gr_mat_clear(T, ctx);
+ GR_TMP_CLEAR(cc, ctx);
+
+ return status;
+}
+
+static int kill_columns(gr_mat_t M, int *good, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong i, j;
+ for (j = 0; j < M->c; ++j)
+ if (good[j] == 0)
+ for (i = 0; i < M->r; ++i)
+ status |= gr_zero(gr_mat_entry_ptr(M, i, j, ctx), ctx);
+ return status;
+}
+
+int gr_lil_mat_solve_block_lanczos(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+{
+ slong r, c, i, j, prev_i, next_i, iter, cur_dim, total_dim = 0;
+ gr_lil_mat_t Mt; /* Transpose of M, we work with A = MtM */
+ gr_mat_struct V[3]; /* Keep track of current vector and two previous ones */
+ gr_mat_t MV; /* Application of M to V */
+ gr_mat_t AV; /* Application of Mt to MV */
+ int *SSt; /* S is the maximal projection s.t. (VS)^tAVS is invertible, so SSt kills the dropped columns */
+ gr_mat_struct nWi[3]; /* -S((VS)^tAVS)^-1S^t */
+ gr_mat_t VSSt; /* V with invalid vectors zeroed out */
+ gr_mat_t T; /* Used to store transposes for inner products */
+ gr_mat_t VtAV; /* Inner product _A */
+ gr_mat_t AVtAVSSt_VtAV; /* Sum _A SS^t + _A, shared by two updates */
+ gr_mat_t DEF; /* Used to store coefficient matrices D, E, and F */
+ gr_mat_t I, tmp; /* I_{b x b}, tmp used as scratch */
+ gr_ptr Mtb, SStVtMtb, WiSStVtMtb, VSStWiSStVtMtb; /* Intermediate elements in x update */
+ int status = GR_UNABLE;
+
+ // TODO: handle this
+ if (x == b)
+ return GR_DOMAIN;
+
+ r = gr_sparse_mat_nrows(M, ctx);
+ c = gr_sparse_mat_ncols(M, ctx);
+
+ if (_gr_vec_is_zero(b, r, ctx) == T_TRUE)
+ {
+ status |= _gr_vec_zero(x, c, ctx);
+ return GR_SUCCESS;
+ }
+
+ gr_lil_mat_init(Mt, c, r, ctx);
+ for (i = 0; i < 3; ++i)
+ gr_mat_init(&V[i], c, block_size, ctx);
+ gr_mat_init(MV, r, block_size, ctx); /* Intermediate product */
+ gr_mat_init(AV, c, block_size, ctx); /* Symmetric product */
+ SSt = flint_malloc(block_size*sizeof(*SSt));
+ for (i = 0; i < 3; ++i)
+ gr_mat_init(&nWi[i], block_size, block_size, ctx);
+ gr_mat_init(VSSt, c, block_size, ctx);
+ gr_mat_init(T, block_size, c, ctx); /* Transpose for computing matrix dot products */
+ gr_mat_init(VtAV, block_size, block_size, ctx);
+ gr_mat_init(AVtAVSSt_VtAV, block_size, block_size, ctx); /* (AV)^T(AV) + VtAV */
+ gr_mat_init(DEF, block_size, block_size, ctx); /* Shared by D, E, and F */
+ gr_mat_init(I, block_size, block_size, ctx);
+ gr_mat_init(tmp, block_size, block_size, ctx);
+ GR_TMP_INIT_VEC(Mtb, c, ctx);
+ GR_TMP_INIT_VEC(SStVtMtb, block_size, ctx);
+ GR_TMP_INIT_VEC(WiSStVtMtb, block_size, ctx);
+ GR_TMP_INIT_VEC(VSStWiSStVtMtb, c, ctx);
+
+ status |= _gr_vec_zero(x, c, ctx);
+ status |= gr_lil_mat_transpose(Mt, M, ctx);
+ for (i = 0; i < block_size; ++i) SSt[i] = 1;
+ status |= gr_mat_one(I, ctx);
+ status |= gr_lil_mat_mul_vec(Mtb, Mt, b, ctx);
+
+ /* Initialize V[0] randomly */
+ for (i = 0; i < c; ++i)
+ for (j = 0; j < block_size; ++j)
+ status |= gr_randtest(gr_mat_entry_ptr(&V[0], i, j, ctx), state, ctx);
+
+ for (iter = 0; ; ++iter)
+ {
+ i = iter % 3;
+ next_i = (iter + 1) % 3;
+ prev_i = (iter + 2) % 3;
+ if (iter >= 2)
+ {
+ /* Compute the F value for this round (minus the final term) */
+ status |= gr_mat_addmul(DEF, I, VtAV, &nWi[prev_i], ctx);
+ status |= gr_mat_mul(tmp, &nWi[next_i], DEF, ctx);
+ status |= gr_mat_mul(DEF, tmp, AVtAVSSt_VtAV, ctx);
+ }
+
+ /* Compute AV and V'AV */
+ status |= gr_lil_mat_mul_mat(MV, M, &V[i], ctx);
+ status |= gr_lil_mat_mul_mat(AV, Mt, MV, ctx);
+ status |= gr_mat_transpose(T, &V[i], ctx);
+ status |= gr_mat_mul(VtAV, T, AV, ctx);
+ if (gr_mat_is_zero(VtAV, ctx) == T_TRUE) {status = GR_SUCCESS; break;}
+
+ /* Compute W^{-1} and indices of bad vectors */
+ status |= compute_nWi_S(&cur_dim, &nWi[i], SSt, VtAV, ctx);
+ total_dim += cur_dim;
+ if (cur_dim == 0 || total_dim > c) break; /* Ran out of vectors */
+
+ /* Update x_i = x_{i-1} - (VSS^t) W^{-1} (VSS^t)^tb */
+ status |= gr_mat_set(VSSt, &V[i], ctx);
+ status |= kill_columns(VSSt, SSt, ctx);
+ status |= gr_mat_transpose(T, VSSt, ctx);
+ status |= gr_mat_mul_vec(SStVtMtb, T, Mtb, ctx);
+ status |= gr_mat_mul_vec(WiSStVtMtb, &nWi[i], SStVtMtb, ctx);
+ status |= gr_mat_mul_vec(VSStWiSStVtMtb, VSSt, WiSStVtMtb, ctx);
+ status |= _gr_vec_add(x, x, VSStWiSStVtMtb, c, ctx);
+
+ /**
+ * Per Equation (19), we compute the next vector
+ * V_{i+1} = AV_iS_iS_i^t + V_i D + V_{i-1} E + V_{i-2} F
+ * where
+ * D = I - W_i^-1((AV_i)^tAV_iS_iS_i^t + V_i^tAV_i)
+ * E = -W_{i-1}^-1V_i^tAV_iS_iS_i^t
+ * F = -W_{i-2}^-1(I - V_{i-1}^tAV_{i-1}W_{i-1}^-1)
+ * ((AV_{i-1})^tAV_{i-1}S_{i-1}S_{i-1}^t + V_{i-1}^tAV_{i-1})S_iS_i^t
+ **/
+ if (iter >= 2)
+ {
+ /* V_{i+1} = V_{i-2} F */
+ status |= kill_columns(DEF, SSt, ctx);
+ status |= gr_mat_mul(VSSt, &V[next_i], DEF, ctx);
+ status |= gr_mat_set(&V[next_i], VSSt, ctx);
+ }
+ if (iter >= 1)
+ {
+ /* V_{i+1} += V_{i-1} E */
+ status |= gr_mat_mul(DEF, &nWi[prev_i], VtAV, ctx);
+ status |= kill_columns(DEF, SSt, ctx);
+ status |= gr_mat_addmul(&V[next_i], &V[next_i], &V[prev_i], DEF, ctx);
+ }
+ /* V_{i+1} += V_i D */
+ status |= gr_mat_transpose(T, AV, ctx);
+ status |= gr_mat_mul(tmp, T, AV, ctx);
+ status |= kill_columns(tmp, SSt, ctx);
+ status |= gr_mat_add(AVtAVSSt_VtAV, tmp, VtAV, ctx);
+ status |= gr_mat_addmul(DEF, I, &nWi[i], AVtAVSSt_VtAV, ctx);
+ status |= gr_mat_addmul(&V[next_i], &V[next_i], &V[i], DEF, ctx);
+
+ /* V_{i+1} += AVSS^t */
+ status |= kill_columns(AV, SSt, ctx);
+ status |= gr_mat_add(&V[next_i], &V[next_i], AV, ctx);
+
+ if (gr_mat_is_zero(&V[next_i], ctx) == T_TRUE) {status = GR_SUCCESS; break;}
+ }
+ status |= _gr_vec_neg(x, x, c, ctx);
+ gr_lil_mat_clear(Mt, ctx);
+ for (i = 0; i < 3; ++i)
+ gr_mat_clear(&V[i], ctx);
+ gr_mat_clear(MV, ctx);
+ gr_mat_clear(AV, ctx);
+ flint_free(SSt);
+ for (i = 0; i < 3; ++i)
+ gr_mat_clear(&nWi[i], ctx);
+ gr_mat_clear(T, ctx);
+ gr_mat_clear(VtAV, ctx);
+ gr_mat_clear(VSSt, ctx);
+ gr_mat_clear(AVtAVSSt_VtAV, ctx);
+ gr_mat_clear(DEF, ctx);
+ gr_mat_clear(I, ctx);
+ gr_mat_clear(tmp, ctx);
+ GR_TMP_CLEAR_VEC(Mtb, c, ctx);
+ GR_TMP_CLEAR_VEC(SStVtMtb, block_size, ctx);
+ GR_TMP_CLEAR_VEC(WiSStVtMtb, block_size, ctx);
+ GR_TMP_CLEAR_VEC(VSStWiSStVtMtb, c, ctx);
+ return status;
+}
+
+int gr_lil_mat_nullvector_block_lanczos(gr_ptr x, const gr_lil_mat_t M, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ gr_ptr x2, b;
+ GR_TMP_INIT_VEC(x2, M->c, ctx);
+ GR_TMP_INIT_VEC(b, M->r, ctx);
+
+ status |= _gr_vec_randtest(x, state, M->c, ctx);
+ status |= gr_lil_mat_mul_vec(b, M, x, ctx);
+ status |= gr_lil_mat_solve_block_lanczos(x2, M, b, block_size, state, ctx);
+
+ if (status == GR_SUCCESS)
+ {
+ status |= _gr_vec_sub(x, x, x2, M->c, ctx);
+ if (_gr_vec_is_zero(x, M->c, ctx) == T_TRUE)
+ status = GR_TEST_FAIL;
+ else
+ {
+ status |= gr_lil_mat_mul_vec(b, M, x, ctx);
+ if (_gr_vec_is_zero(b, M->r, ctx) == T_FALSE)
+ status = GR_DOMAIN;
+ }
+ }
+ GR_TMP_CLEAR_VEC(x2, M->c, ctx);
+ GR_TMP_CLEAR_VEC(b, M->r, ctx);
+ return status;
+}
diff --git a/src/gr_sparse_mat/solve_block_wiedemann.c b/src/gr_sparse_mat/solve_block_wiedemann.c
new file mode 100644
index 0000000000..16137fb670
--- /dev/null
+++ b/src/gr_sparse_mat/solve_block_wiedemann.c
@@ -0,0 +1,288 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include
+#include
+#include "flint.h"
+#include "gr_sparse_mat.h"
+
+/* Compute S_i=(M^j Y)_{0...b-1}^T for i = 0,...,ns-1 */
+static int make_block_sequences(gr_mat_struct *S, slong ns, const gr_lil_mat_t M, gr_mat_struct Y[2], gr_ctx_t ctx)
+{
+ slong iter, i, b = Y->c;
+ gr_mat_struct W[2];
+ int status = GR_SUCCESS;
+
+ for (i = 0; i < 2; ++i) gr_mat_window_init(&W[i], &Y[i], 0, 0, b, b, ctx);
+ for (i = iter = 0; iter < ns; ++iter, i = 1-i)
+ {
+ if (iter > 0) status |= gr_lil_mat_mul_mat(&Y[i], M, &Y[1-i], ctx);
+ status |= gr_mat_transpose(&S[iter], &W[i], ctx);
+ }
+ for (i = 0; i < 2; ++i) gr_mat_window_clear(&W[i], ctx);
+ return status;
+}
+
+/**
+ * Run Guassian elimination on the first b columns of the augmented
+ * matrix M = [ D | I], yielding a final matrix
+ * [ | ] [ Z | ]
+ * [ D | I ] -> [---| tau ]
+ * [ | ] [ L | ]
+ * where the the number of nonzero rows in Z is the ith rank. We choose
+ * the pivot row for a given column to be the one with minimal degree.
+**/
+static int coppersmith_aux_gauss(gr_mat_t M, slong *d, gr_ctx_t ctx)
+{
+ const slong b = M->r/2;
+ slong pr, pc, r, tmp;
+ slong *gamma;
+ gr_ptr cinv, coeff;
+ int status = GR_SUCCESS;
+
+ /* Keep track of viable rows */
+ gamma = flint_malloc(b * sizeof(slong));
+ for (r = 0; r < b; ++r) gamma[r] = 1;
+
+ GR_TMP_INIT2(cinv, coeff, ctx);
+ for (pc = 0; pc < b; ++pc)
+ {
+ /* Set the pivot row to be the minimum degree row incident on column pc */
+ pr = b + pc;
+ for (r = 0; r < b; r++)
+ if (gamma[r] && gr_mat_entry_ptr(M, r, pc, ctx) && d[r] < d[pr]) pr = r;
+ if (gr_is_zero(gr_mat_entry_ptr(M, pr, pc, ctx), ctx) == T_TRUE) continue;
+
+
+ /* Try to move pivot row to appropriate position (if not already there) */
+ if (pr != b + pc)
+ {
+ tmp = d[pr]; d[pr] = d[b+pc]; d[b+pc] = tmp;
+
+ if (gr_mat_entry_ptr(M, b + pc, pr, ctx))
+ status |= gr_mat_swap_rows(M, NULL, pr, b + pc, ctx), pr = b + pc;
+ else /* Need to make new auxiliary vector and remove r from use */
+ status |= _gr_vec_add(M->rows[b + pc], M->rows[b + pc], M->rows[pr], 3*b, ctx), gamma[pr] = 0;
+ }
+ status |= gr_inv(cinv, gr_mat_entry_ptr(M, pr, pc, ctx), ctx);
+
+ /* Do Gaussian elimination on first b rows */
+ for (r = 0; r < b; ++r)
+ if (gamma[r] && gr_is_zero(gr_mat_entry_ptr(M, r, pc, ctx), ctx) == T_FALSE)
+ {
+ status |= gr_mul(coeff, gr_mat_entry_ptr(M, r, pc, ctx), cinv, ctx);
+ status |= gr_neg(coeff, coeff, ctx);
+ status |= _gr_vec_addmul_scalar(
+ M->rows[r], M->rows[pr], M->c,
+ coeff, ctx
+ );
+
+ }
+ }
+ flint_free(gamma);
+ GR_TMP_CLEAR2(cinv, coeff, ctx);
+ return status;
+}
+
+/* Stop with failure if sum(d_0 ... d_{b-1}) < delta */
+/* Stop with success if sum(d_0 ... d_{b-1}) < delta + max(d_0 ... d_{b-1}) - min(d_b ... d_{2b-1}) */
+static int coppersmith_stopping_criterion(slong *d, slong delta, slong b)
+{
+ slong tmp, r;
+
+ /* Sum degrees of generating polynomials */
+ tmp = d[0]; for (r = 1; r < b; ++r) tmp += d[r];
+ delta -= tmp;
+ if (delta < 0) return 0; /* Insufficient degree */
+
+ /* Add maximum degree of first b polys and subtract minimum degree of last b */
+ tmp = d[0]; for (r = 1; r < b; ++r) if (d[r] > tmp) tmp = d[r];
+ delta += tmp;
+ tmp = d[b]; for (r = b + 1; r < 2*b; ++r) if (d[r] < tmp) tmp = d[r];
+ delta -= tmp;
+ return delta < 0 ? 1 : -1;
+}
+
+/**
+ * Generalization of Berlekamp-Massey due to Coppersmith.
+ * Iteratively computes a sequence F representing 2b polynomials:
+ * - the first b are the current (reversed) generating polynomials
+ * - the last b are certain auxiliary polynomials.
+**/
+static int find_block_min_poly(gr_mat_struct *S, slong *d, slong n, slong delta, gr_ctx_t ctx)
+{
+ int ret;
+ slong t;
+ slong i, k, r, b = S->r;
+ slong f_len;
+ gr_mat_struct *F;
+ gr_mat_t M, D, tau, tmp;
+ int status = GR_SUCCESS;
+
+ f_len = 1;
+ F = flint_malloc((n + 1) * sizeof(gr_mat_struct));
+ gr_mat_init(&F[0], 2*b, b, ctx);
+ gr_mat_init(tmp, b, b, ctx);
+ for (i = 0; i < b; ++i)
+ {
+ d[i] = 0;
+ d[b + i] = 1;
+ status |= gr_one(gr_mat_entry_ptr(&F[0], i, i, ctx), ctx);
+ }
+
+ /* [ D | I ] -> [ ? | tau ]*/
+ gr_mat_init(M, 2*b, 3*b, ctx);
+
+ for (t = 0, ret = -1; t < n && ret == -1; ++t)
+ {
+ /* Compute discrepancy matrix and tau */
+ gr_mat_window_init(D, M, 0, 0, 2*b, b, ctx);
+ gr_mat_window_init(tau, M, 0, b, 2*b, 3*b, ctx);
+ status |= gr_mat_zero(D, ctx);
+ for (k = 0; k <= t; ++k) status |= gr_mat_addmul(D, D, &F[k], &S[t-k], ctx);
+ status |= gr_mat_one(tau, ctx);
+ gr_mat_window_clear(D, ctx);
+ gr_mat_window_clear(tau, ctx);
+ status |= coppersmith_aux_gauss(M, d, ctx);
+
+ /* Multiply F by tau * diag(I xI) */
+ gr_mat_window_init(tau, M, 0, b, 2*b, 3*b, ctx); /* Needed since gauss reorders rows */
+ gr_mat_init(&F[f_len++], 2*b, b, ctx);
+ for (k = f_len-1; k > 0; --k)
+ status |= gr_mat_mul(&F[k], tau, &F[k-1], ctx); /* Every row multiplied by x */
+ for (k = 0; k < f_len; ++k)
+ for (r = 0; r < b; ++r) /* Divide first b rows by x */
+ {
+ if (k < f_len - 1)
+ status |= _gr_vec_set(F[k].rows[r], F[k+1].rows[r], b, ctx);
+ else
+ status |= _gr_vec_zero(F[k].rows[r], b, ctx);
+ }
+ for (r = b; r < 2*b; ++r)
+ status |= _gr_vec_zero(F[0].rows[r], b, ctx), d[r] += 1;
+ gr_mat_window_clear(tau, ctx);
+ ret = coppersmith_stopping_criterion(d, delta, b);
+ }
+
+ /* Copy C to S, with each row reversed according to its degree */
+ for (r = 0; r < b; ++r)
+ for (k = 0; k <= d[r]; k++)
+ status |= _gr_vec_set(S[k].rows[r], F[d[r]-k].rows[r], b, ctx);
+
+ for (k = 0; k < f_len; ++k)
+ gr_mat_clear(&F[k], ctx);
+ gr_mat_clear(M, ctx);
+ flint_free(F);
+ return status;
+}
+
+static int make_block_sum(gr_ptr x, const gr_mat_struct *S, const slong *d, const gr_lil_mat_t M, gr_mat_struct Z[2], slong l, gr_ctx_t ctx)
+{
+ slong i, iter, b = S->r;
+ slong dd;
+ gr_ptr xi;
+ int status = GR_SUCCESS;
+
+ /* Compute differences between nominal and real degree */
+ dd = 0;
+ while (_gr_vec_is_zero(S[dd].rows[l], b, ctx) == T_TRUE) ++dd;
+
+ /* Simulaneously apply all polynomials in row l to iteration of M on Z */
+ GR_TMP_INIT_VEC(xi, M->c, ctx);
+ status |= _gr_vec_zero(x, M->c, ctx);
+ for (i = iter = 0; iter <= d[l]; ++iter, i = 1 - i)
+ {
+ if (iter > 0) status |= gr_lil_mat_mul_mat(&Z[i], M, &Z[1-i], ctx);
+ status |= gr_mat_mul_vec(xi, &Z[i], S[dd + iter].rows[l], ctx);
+ status |= _gr_vec_add(x, x, xi, M->c, ctx);
+ }
+ GR_TMP_CLEAR_VEC(xi, M->c, ctx);
+ return status;
+}
+
+int gr_lil_mat_solve_block_wiedemann(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+{
+ int i;
+ slong sz = ctx->sizeof_elem;
+ gr_ptr x1, coeff;
+ gr_lil_mat_t Mb;
+ int status = GR_SUCCESS;
+
+ if (M->r != M->c) return 0; /* TODO */
+ if (_gr_vec_is_zero(b, M->c, ctx) == T_TRUE)
+ {
+ return _gr_vec_zero(x, M->c, ctx);
+ }
+
+ /* TODO: Precondition M */
+ GR_TMP_INIT(coeff, ctx);
+ GR_TMP_INIT_VEC(x1, M->c + 1, ctx);
+ gr_lil_mat_init(Mb, M->r + 1, M->c + 1, ctx);
+ for (i = 0; i < M->r; ++i)
+ {
+ gr_sparse_vec_fit_nnz(&Mb->rows[i], M->rows[i].nnz + 1, ctx);
+ status |= gr_sparse_vec_set(&Mb->rows[i], &M->rows[i], ctx);
+ status |= gr_sparse_vec_set_entry(&Mb->rows[i], M->c, GR_ENTRY(b, i, sz), ctx);
+ }
+ status |= gr_lil_mat_set(Mb, M, ctx);
+
+ status |= gr_lil_mat_nullvector_block_wiedemann(x1, Mb, block_size, state, ctx);
+ if (status == GR_SUCCESS && gr_is_zero(GR_ENTRY(x1, M->c, sz), ctx) == T_FALSE)
+ {
+ status |= gr_inv(coeff, GR_ENTRY(x1, M->c, sz), ctx);
+ status |= gr_neg(coeff, coeff, ctx);
+ status |= _gr_vec_mul_scalar(x, x1, M->c, coeff, ctx);
+ }
+ gr_lil_mat_clear(Mb, ctx);
+ GR_TMP_CLEAR_VEC(x1, M->c + 1, ctx);
+ GR_TMP_CLEAR(coeff, ctx);
+ return status;
+}
+
+int gr_lil_mat_nullvector_block_wiedemann(gr_ptr x, const gr_lil_mat_t M, slong block_size, flint_rand_t state, gr_ctx_t ctx)
+{
+ slong l, ns, k;
+ slong *d;
+ gr_ptr b;
+ gr_mat_struct Y[3], *S;
+ int status = GR_SUCCESS;
+ if (M->r != M->c) return 0; /* TODO */
+
+ ns = 2*M->r/block_size + 3; /* Maybe 5? */
+ S = flint_malloc(ns*sizeof(*S));
+ d = flint_calloc(2*block_size, sizeof(*d));
+ GR_TMP_INIT_VEC(b, M->r, ctx);
+ for (k = 0; k < ns; ++k)
+ gr_mat_init(&S[k], block_size, block_size, ctx);
+ for (l = 0; l < 3; ++l)
+ gr_mat_init(&Y[l], M->c, block_size, ctx);
+ do
+ status |= gr_mat_randtest(&Y[0], state, ctx);
+ while (gr_mat_is_zero(&Y[0], ctx) == T_TRUE);
+
+ status |= gr_lil_mat_mul_mat(&Y[1], M, &Y[0], ctx);
+ status |= make_block_sequences(S, ns, M, &Y[1], ctx);
+ status |= find_block_min_poly(S, d, ns, M->r, ctx);
+
+ for (l = 0; l < block_size; ++l)
+ {
+ status |= gr_mat_set(&Y[1], &Y[0], ctx);
+ status |= make_block_sum(x, S, d, M, Y + 1, l, ctx);
+ status |= gr_lil_mat_mul_vec(b, M, x, ctx);
+ if (!_gr_vec_is_zero(x, M->c, ctx) && _gr_vec_is_zero(b, M->r, ctx)) {status = GR_DOMAIN; break;};
+ }
+ GR_TMP_CLEAR_VEC(b, M->r, ctx);
+ return status;
+}
+
+
diff --git a/src/gr_sparse_mat/solve_lanczos.c b/src/gr_sparse_mat/solve_lanczos.c
new file mode 100644
index 0000000000..aab6a2031f
--- /dev/null
+++ b/src/gr_sparse_mat/solve_lanczos.c
@@ -0,0 +1,145 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+int gr_lil_mat_solve_lanczos(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, flint_rand_t state, gr_ctx_t ctx)
+{
+ slong j, r, c, iter;
+ gr_lil_mat_t Mt;
+ gr_ptr v[2], Mv, Av, Mtb;
+ gr_ptr vtAv[2], AvtAv, vMtb, tmp;
+ int status = GR_SUCCESS;
+
+ // TODO: handle this
+ if (x == b)
+ return GR_DOMAIN;
+
+ r = gr_sparse_mat_nrows(M, ctx);
+ c = gr_sparse_mat_ncols(M, ctx);
+
+ // flint_printf("M = "); gr_lil_mat_print_nz(M, ctx); flint_printf("\n");
+ status |= _gr_vec_zero(x, c, ctx);
+ if (_gr_vec_is_zero(b, c, ctx) == T_TRUE)
+ return GR_SUCCESS; // Trivial solution works
+
+ /* We assume that M is not symmetric, and work with A = M^t M */
+ gr_lil_mat_init(Mt, c, r, ctx);
+ status |= gr_lil_mat_transpose(Mt, M, ctx);
+ //flint_printf("M^T = "); gr_lil_mat_print_nz(Mt, ctx); flint_printf("\n");
+
+ /* Construct auxiliary values and vectors */
+ /* Rather than storing the whole sequence of values v_j, we alternate between two vectors */
+ GR_TMP_INIT5(vtAv[0], vtAv[1], AvtAv, vMtb, tmp, ctx);
+ GR_TMP_INIT_VEC(v[0], c, ctx);
+ GR_TMP_INIT_VEC(v[1], c, ctx);
+ GR_TMP_INIT_VEC(Mv, r, ctx);
+ GR_TMP_INIT_VEC(Av, c, ctx);
+ GR_TMP_INIT_VEC(Mtb, c, ctx);
+
+ /* Make 0th vector random (and -1st vector trivial) */
+ /*_gr_vec_set(v[0], Mtb, M->c);
+ for (j = 0; j < M->c; ++j) v[0][j] = n_randint(state, M->mod.n); */
+ status |= gr_lil_mat_mul_vec(Mtb, Mt, b, ctx);
+ // flint_printf("\tM^Tb = "); _gr_vec_print(Mtb, c, ctx); flint_printf("\n");
+ status |= _gr_vec_randtest(v[0], state, c, ctx);
+ status |= _gr_vec_zero(v[1], c, ctx);
+ status |= gr_one(vtAv[1], ctx);
+ for (iter = j = 0; ; j = 1-j, ++iter)
+ {
+ // flint_printf("\n\niter = %d\n", iter);
+ /* Compute A v_j and check if it is orthogonal to v_j */
+ // flint_printf("\n\tv[%d] = ", j); _gr_vec_print(v[j], c, ctx); flint_printf("\n");
+ status |= gr_lil_mat_mul_vec(Mv, M, v[j], ctx);
+ // flint_printf("\tMv[%d] = ", j); _gr_vec_print(Mv, r, ctx); flint_printf("\n");
+ status |= gr_lil_mat_mul_vec(Av, Mt, Mv, ctx);
+ // flint_printf("\tAv[%d] = ", j); _gr_vec_print(Av, c, ctx); flint_printf("\n");
+ status |= _gr_vec_dot(vtAv[j], NULL, 0, v[j], Av, c, ctx);
+ // flint_printf("\tv[%d]^TAv[%d] = ", j, j); gr_println(vtAv[j], ctx);
+ if (gr_is_zero(vtAv[j], ctx) == T_TRUE) break; /* Can't make any more progress */
+
+ /* Update putative solution by (/) * v_j */
+ status |= _gr_vec_dot(vMtb, NULL, 0, v[j], Mtb, c, ctx);
+ // flint_printf("\n\tv[%d]M^Tb = ", j); gr_println(vMtb, ctx);
+ status |= gr_div(vMtb, vMtb, vtAv[j], ctx);
+ // flint_printf("\tv[%d]M^Tb/(v[%d]^TAv[%d]) = ", j, j, j); gr_println(vMtb, ctx);
+ status |= _gr_vec_addmul_scalar(x, v[j], c, vMtb, ctx);
+ // flint_printf("\tx = x + ([%d]M^Tb/(v[%d]^TAv[%d])) v[%d] = ", j, j, j); _gr_vec_print(x, c, ctx); flint_printf("\n");
+
+ /* v_{j+1} = MtMv - alpha*v_j - beta*v_{j-1}, where */
+ /* alpha = /delta_j, and */
+ /* beta = delta_j/delta_{j-1} */
+ status |= _gr_vec_dot(AvtAv, NULL, 0, Av, Av, c, ctx);
+ // flint_printf("\t(Av[%d])^TAv[%d] = ", j, j); gr_println(AvtAv, ctx);
+ status |= gr_div(tmp, vtAv[j], vtAv[1-j], ctx);
+ // flint_printf("\t((Av[%d])^TAv[%d])/((Av[%d])^TAv[%d]) = ", j, j, 1-j, 1-j); gr_println(tmp, ctx);
+ status |= gr_neg(tmp, tmp, ctx);
+ // flint_printf("\t-((Av[%d])^TAv[%d])/((Av[%d])^TAv[%d]) = ", j, j, 1-j, 1-j); gr_println(tmp, ctx);
+ status |= _gr_vec_mul_scalar(v[1-j], v[1-j], M->c, tmp, ctx);
+ // flint_printf("\tv[%d] = -((Av[%d])^TAv[%d])/((Av[%d])^TAv[%d]) v[%d] = ", 1-j, j, j, 1-j, 1-j, 1-j); _gr_vec_print(v[1-j], c, ctx); flint_printf("\n");
+
+ status |= gr_div(tmp, AvtAv, vtAv[j], ctx);
+ // flint_printf("\n\t((Av[%d])^TAv[%d])/(v[%d]^TAv[%d]) = ", j, j, j, j); gr_println(tmp, ctx);
+ status |= gr_neg(tmp, tmp, ctx);
+ // flint_printf("\t-((Av[%d])^TAv[%d])/(v[%d]^TAv[%d]) = ", j, j, j, j); gr_println(tmp, ctx);
+ status |= _gr_vec_addmul_scalar(v[1-j], v[j], M->c, tmp, ctx);
+ // flint_printf("\tv[%d] = v[%d] - ((Av[%d])^TAv[%d])/(v[%d]^TAv[%d]) v[%d] = ", 1-j, 1-j, j, j, j, j, j); _gr_vec_print(v[1-j], c, ctx); flint_printf("\n");
+ status |= _gr_vec_add(v[1-j], v[1-j], Av, M->c, ctx);
+ // flint_printf("\tv[%d] = v[%d] + Av[%d] = ", 1-j, 1-j, j); _gr_vec_print(v[1-j], c, ctx); flint_printf("\n");
+ }
+ /* Check result */
+ // flint_printf("x = ", j); _gr_vec_print(x, c, ctx); flint_printf("\n");
+ status |= gr_lil_mat_mul_vec(Mv, M, x, ctx);
+ // flint_printf("Mx = ", j); _gr_vec_print(Mv, r, ctx); flint_printf("\n");
+ status |= gr_lil_mat_mul_vec(Av, Mt, Mv, ctx);
+ // flint_printf("Ax = ", j); _gr_vec_print(Av, r, ctx); flint_printf("\n");
+ if (_gr_vec_equal(Av, Mtb, c, ctx) == T_FALSE)
+ status = GR_UNABLE;
+
+ /* Clear auxiliary vectors and transpose */
+ gr_lil_mat_clear(Mt, ctx);
+ GR_TMP_CLEAR5(vtAv[0], vtAv[1], AvtAv, vMtb, tmp, ctx);
+ GR_TMP_CLEAR_VEC(v[0], c, ctx);
+ GR_TMP_CLEAR_VEC(v[1], c, ctx);
+ GR_TMP_CLEAR_VEC(Mv, r, ctx);
+ GR_TMP_CLEAR_VEC(Av, c, ctx);
+ GR_TMP_CLEAR_VEC(Mtb, c, ctx);
+ return status;
+}
+
+int gr_lil_mat_nullvector_lanczos(gr_ptr x, const gr_lil_mat_t M, flint_rand_t state, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ gr_ptr x2, b;
+ GR_TMP_INIT_VEC(x2, M->c, ctx);
+ GR_TMP_INIT_VEC(b, M->r, ctx);
+
+ status |= _gr_vec_randtest(x, state, M->c, ctx);
+ status |= gr_lil_mat_mul_vec(b, M, x, ctx);
+ status |= gr_lil_mat_solve_lanczos(x2, M, b, state, ctx);
+
+ if (status == GR_SUCCESS)
+ {
+ status |= _gr_vec_sub(x, x, x2, M->c, ctx);
+ if (_gr_vec_is_zero(x, M->c, ctx) == T_TRUE)
+ status = GR_TEST_FAIL;
+ else
+ {
+ status |= gr_lil_mat_mul_vec(b, M, x, ctx);
+ if (_gr_vec_is_zero(b, M->r, ctx) == T_FALSE)
+ status = GR_DOMAIN;
+ }
+ }
+ GR_TMP_CLEAR_VEC(x2, M->c, ctx);
+ GR_TMP_CLEAR_VEC(b, M->r, ctx);
+ return status;
+}
diff --git a/src/gr_sparse_mat/solve_wiedemann.c b/src/gr_sparse_mat/solve_wiedemann.c
new file mode 100644
index 0000000000..b69c90f524
--- /dev/null
+++ b/src/gr_sparse_mat/solve_wiedemann.c
@@ -0,0 +1,265 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by th e Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include
+#include
+#include "flint.h"
+#include "gr_sparse_mat.h"
+
+/* Berlekamp - Massey algorithm */
+static int find_min_poly(slong *L, gr_ptr s, slong N, gr_ctx_t ctx)
+{
+ slong m, n, i, sz;
+ slong deg_C, deg_B, deg_T;
+ gr_ptr B, C, T;
+ gr_ptr c, d_C, d_B;
+ int status = GR_SUCCESS;
+
+ sz = ctx->sizeof_elem;
+ GR_TMP_INIT3(c, d_C, d_B, ctx);
+ GR_TMP_INIT_VEC(B, N, ctx);
+ GR_TMP_INIT_VEC(C, N, ctx);
+ GR_TMP_INIT_VEC(T, N, ctx);
+
+ deg_C = 0, deg_B = 0, deg_T = -1;
+ status |= gr_one(d_B, ctx);
+ status |= gr_one(B, ctx);
+ status |= gr_one(C, ctx);
+
+ *L = 0;
+ for (n = 0, m = 1; n < N; n++, m++)
+ {
+ /* d_C = sum_{i = 0}^L C_i * s_{n-i} */
+ status |= gr_set(d_C, GR_ENTRY(s, n, sz), ctx);
+ for (i = 1; i <= *L; i++)
+ status |= gr_addmul(d_C, GR_ENTRY(C, i, sz), GR_ENTRY(s, n-i, sz), ctx);
+ if (gr_is_zero(d_C, ctx) == T_TRUE) continue; /* C and L currently valid */
+
+ /* C(x) = C(x) - (d_C/d_B) x^m B(x); */
+ if (*L <= 2*n)
+ {
+ deg_T = deg_C;
+ status |= _gr_vec_set(T, C, deg_C+1, ctx);
+ }
+ status |= gr_div(c, d_C, d_B, ctx);
+ status |= gr_neg(c, c, ctx);
+ for (i = 0; i <= deg_B; ++i)
+ status |= gr_addmul(GR_ENTRY(C, m+i, sz), GR_ENTRY(B, i, sz), c, ctx);
+
+ deg_C = FLINT_MAX(deg_C, deg_B + m);
+ while (gr_is_zero(GR_ENTRY(C, deg_C, sz), ctx) == T_TRUE)
+ --deg_C; /* Probably unnecessary */
+
+ if (2 * *L <= n) /* Increase number of errors */
+ {
+ *L = n + 1 - *L, m = 0;
+ status |= gr_set(d_B, d_C, ctx);
+ deg_B = deg_T;
+ status |= _gr_vec_set(B, T, deg_T+1, ctx);
+ }
+ }
+ /* Reverse C into s */
+ for (i = 0; i <= *L; ++i)
+ status |= gr_set(GR_ENTRY(s, i, sz), GR_ENTRY(C, *L-i, sz), ctx);
+
+ GR_TMP_CLEAR_VEC(B, N, ctx);
+ GR_TMP_CLEAR_VEC(C, N, ctx);
+ GR_TMP_CLEAR_VEC(T, N, ctx);
+ GR_TMP_CLEAR3(c, d_C, d_B, ctx);
+ return status;
+}
+
+/* Compute s_ij=(M^j y)_i for i = 0,...,ns-1, j = 0,...,num-1*/
+static int make_sequences(gr_ptr *s, slong ns, slong len, const gr_lil_mat_t M, gr_srcptr b, gr_ctx_t ctx)
+{
+ slong i, j, r, sz;
+ gr_ptr y, My;
+ int status = GR_SUCCESS;
+
+ sz = ctx->sizeof_elem;
+ r = gr_sparse_mat_nrows(M, ctx);
+
+ GR_TMP_INIT_VEC(y, r, ctx);
+ GR_TMP_INIT_VEC(My, r, ctx);
+ status |= _gr_vec_set(y, b, r, ctx);
+
+ for (j = 0; j < len; ++j)
+ {
+ if (j > 0)
+ {
+ status |= gr_lil_mat_mul_vec(My, M, y, ctx);
+ status |= _gr_vec_set(y, My, r, ctx);
+ }
+ for (i = 0; i < ns; ++i)
+ status |= gr_set(GR_ENTRY(s[i], j, sz), GR_ENTRY(y, i, sz), ctx);
+ }
+ GR_TMP_CLEAR_VEC(y, r, ctx);
+ GR_TMP_CLEAR_VEC(My, r, ctx);
+ return status;
+}
+
+/* Compute x = \Sigma_{i = 0}^{L-1} s_i * M^i * b = 0 */
+static int make_sum(gr_ptr x, gr_ptr s, slong L, const gr_lil_mat_t M, gr_srcptr b, gr_ctx_t ctx)
+{
+ slong i, r, sz;
+ gr_ptr y, My;
+ int status;
+
+ sz = ctx->sizeof_elem;
+ r = gr_sparse_mat_nrows(M, ctx);
+
+ GR_TMP_INIT_VEC(y, r, ctx);
+ GR_TMP_INIT_VEC(My, r, ctx);
+ status = _gr_vec_set(y, b, r, ctx);
+
+ //flint_printf("\t\tScaling\n");
+ status = _gr_vec_mul_scalar(x, b, r, s, ctx);
+ for (i = 1; i < L; ++i)
+ {
+ //flint_printf("\t\tIterating %d\n", i);
+ status |= gr_lil_mat_mul_vec(My, M, y, ctx);
+ status |= _gr_vec_set(y, My, r, ctx);
+ status |= _gr_vec_addmul_scalar(x, y, r, GR_ENTRY(s, i, sz), ctx);
+ }
+ GR_TMP_CLEAR_VEC(y, r, ctx);
+ GR_TMP_CLEAR_VEC(My, r, ctx);
+ return status;
+}
+
+int gr_lil_mat_solve_wiedemann(gr_ptr x, const gr_lil_mat_t M, gr_srcptr b, gr_ctx_t ctx)
+{
+ slong i, r, c, L, ns, len, sz;
+ gr_ptr *s;
+ gr_ptr Mx, coeff;
+ int status = GR_SUCCESS;
+
+ // TODO: handle this
+ if (x == b)
+ return GR_DOMAIN;
+ /* TBD: reduce to square */
+ if (M->r != M->c)
+ return GR_DOMAIN;
+
+ sz = ctx->sizeof_elem;
+ r = gr_sparse_mat_nrows(M, ctx);
+ c = gr_sparse_mat_ncols(M, ctx);
+
+ if (_gr_vec_is_zero(b, c, ctx) == T_TRUE)
+ {
+ return _gr_vec_zero(x, c, ctx);
+ }
+
+ GR_TMP_INIT_VEC(Mx, r, ctx);
+ GR_TMP_INIT(coeff, ctx);
+
+ // Get dimension of sequence to solve
+ ns = FLINT_MIN(r, 2);
+ len = 2 * r + 1;
+ s = flint_malloc(ns * sizeof(gr_ptr));
+ for (i = 0; i < ns; ++i)
+ GR_TMP_INIT_VEC(s[i], len, ctx);
+
+ //flint_printf("Make sequences\n");
+ status |= make_sequences(s, ns, len, M, b, ctx);
+
+ /* Don't have block Berlekamp yet, just try each one */
+ for (i = 0; i < ns; ++i)
+ {
+ /* Get minimal polynomial */
+ //flint_printf("Finding minimal polynomial for index %d\n", i);
+ status |= find_min_poly(&L, s[i], len, ctx);
+ if (gr_is_zero(s[i], ctx) == T_TRUE) continue;
+
+ /* \sum_{j = 0}^L s_ijM^jb = 0 */
+ /* => x = -1/s[0]\sum_{j = 0}^{L-1} s_i(j-1) M^jb solves Mx = b */
+ //flint_printf("\tMaking sum\n");
+ status |= make_sum(x, GR_ENTRY(s[i], 1, sz), L, M, b, ctx);
+ //flint_printf("\tInverting\n");
+ status |= gr_inv(coeff, s[i], ctx);
+ status |= gr_neg(coeff, coeff, ctx);
+ //flint_printf("\tScaling\n");
+ status |= _gr_vec_mul_scalar(x, x, r, coeff, ctx);
+
+ /* Check if successful */
+ //flint_printf("\tChecking result\n");
+ status |= gr_lil_mat_mul_vec(Mx, M, x, ctx);
+ if (_gr_vec_equal(Mx, b, r, ctx) != T_FALSE)
+ break;
+ }
+ if (i == ns)
+ status |= GR_UNABLE;
+
+ GR_TMP_CLEAR_VEC(Mx, r, ctx);
+ GR_TMP_CLEAR(coeff, ctx);
+
+ for (i = 0; i < ns; ++i)
+ GR_TMP_CLEAR_VEC(s[i], len, ctx);
+ flint_free(s);
+ return status;
+}
+
+int gr_lil_mat_nullvector_wiedemann(gr_ptr x, const gr_lil_mat_t M, flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, L, r, c, ns, len;
+ gr_ptr *s;
+ gr_ptr Mx, b;
+ int status = GR_SUCCESS;
+
+ // TODO: handle this
+ if (M->r != M->c)
+ return GR_DOMAIN;
+
+ r = gr_sparse_mat_nrows(M, ctx);
+ c = gr_sparse_mat_ncols(M, ctx);
+
+ GR_TMP_INIT_VEC(Mx, r, ctx);
+ GR_TMP_INIT_VEC(b, r, ctx);
+
+ // Get dimension of sequence to solve
+ ns = FLINT_MIN(r, 2);
+ len = 2 * r + 1;
+ s = flint_malloc(ns * sizeof(gr_ptr));
+ for (i = 0; i < ns; ++i)
+ GR_TMP_INIT_VEC(s[i], len, ctx);
+
+ status |= _gr_vec_randtest(x, state, r, ctx);
+ status |= gr_lil_mat_mul_vec(b, M, x, ctx);
+
+ status |= make_sequences(s, ns, len, M, b, ctx);
+
+ for (i = 0; i < ns; ++i)
+ {
+ /* Get minimal polynomial */
+ status |= find_min_poly(&L, s[i], len, ctx);
+
+ /* \sum_{j = 0}^L s_ijM^jb = 0 */
+ /* => x = \sum_{j = 0}^L s_ijM^jx solves Mx = 0 */
+ status |= make_sum(x, s[i], L+1, M, x, ctx);
+ status |= gr_lil_mat_mul_vec(Mx, M, x, ctx);
+ if
+ (
+ _gr_vec_is_zero(x, c, ctx) != T_TRUE ||
+ _gr_vec_is_zero(Mx, r, ctx) != T_FALSE
+ )
+ break;
+ }
+ if (i == ns)
+ status |= GR_UNABLE;
+
+ GR_TMP_CLEAR_VEC(Mx, r, ctx);
+ GR_TMP_CLEAR_VEC(b, r, ctx);
+ for (i = 0; i < ns; ++i)
+ GR_TMP_CLEAR_VEC(s[i], len, ctx);
+ flint_free(s);
+ return status;
+}
diff --git a/src/gr_sparse_mat/swap_rows.c b/src/gr_sparse_mat/swap_rows.c
new file mode 100644
index 0000000000..91a7500fbc
--- /dev/null
+++ b/src/gr_sparse_mat/swap_rows.c
@@ -0,0 +1,35 @@
+/*
+ Copyright (C) 2022 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+int gr_lil_mat_swap_rows(gr_lil_mat_t mat, slong * perm, slong r, slong s, gr_ctx_t ctx)
+{
+ /* todo: bounds checking */
+ if (r < 0 || s < 0 || r >= mat->r || s >= mat->r)
+ {
+ return GR_DOMAIN;
+ }
+ if (r == s)
+ {
+ return GR_DOMAIN;
+ }
+
+ if (perm != NULL)
+ FLINT_SWAP(slong, perm[r], perm[s]);
+
+ if (mat->rows[r].nnz != 0 || mat->rows[s].nnz != 0)
+ {
+ gr_sparse_vec_swap(&mat->rows[r], &mat->rows[s], ctx);
+ }
+
+ return GR_SUCCESS;
+}
diff --git a/src/gr_sparse_mat/test/main.c b/src/gr_sparse_mat/test/main.c
new file mode 100644
index 0000000000..71bde04c9c
--- /dev/null
+++ b/src/gr_sparse_mat/test/main.c
@@ -0,0 +1,31 @@
+#include
+#include
+
+/* Include functions *********************************************************/
+
+#include "t-init.c"
+#include "t-randtest.c"
+#include "t-conversion.c"
+#include "t-arith.c"
+#include "t-mul.c"
+#include "t-lu.c"
+#include "t-rref.c"
+#include "t-solve.c"
+
+/* Array of test functions ***************************************************/
+
+test_struct tests[] =
+{
+ TEST_FUNCTION(gr_sparse_mat_init),
+ TEST_FUNCTION(gr_sparse_mat_conversion),
+ TEST_FUNCTION(gr_sparse_mat_randtest),
+ TEST_FUNCTION(gr_sparse_mat_arith),
+ TEST_FUNCTION(gr_sparse_mat_mul),
+ TEST_FUNCTION(gr_sparse_mat_rref),
+ TEST_FUNCTION(gr_sparse_mat_lu),
+ TEST_FUNCTION(gr_sparse_mat_solve),
+};
+
+/* main function *************************************************************/
+
+TEST_MAIN(tests)
diff --git a/src/gr_sparse_mat/test/t-arith.c b/src/gr_sparse_mat/test/t-arith.c
new file mode 100644
index 0000000000..2c5baec10b
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-arith.c
@@ -0,0 +1,473 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+#include "fmpz.h"
+#include "fmpq.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_neg(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong M = 20;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_csr_mat_t csr_mat, csr_mat2;
+ gr_lil_mat_t lil_mat, lil_mat2;
+ gr_coo_mat_t coo_mat, coo_mat2;
+ gr_mat_t dmat, dmat2;
+
+ gr_mat_init(dmat, M, N, ctx);
+ gr_mat_init(dmat2, M, N, ctx);
+ gr_coo_mat_init(coo_mat, M, N, ctx);
+ gr_coo_mat_init(coo_mat2, M, N, ctx);
+ gr_csr_mat_init(csr_mat, M, N, ctx);
+ gr_csr_mat_init(csr_mat2, M, N, ctx);
+ gr_lil_mat_init(lil_mat, M, N, ctx);
+ gr_lil_mat_init(lil_mat2, M, N, ctx);
+
+ //flint_printf("Testing neg coo\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 80, 0, T_TRUE, state, ctx);
+ status |= gr_coo_mat_neg(coo_mat2, coo_mat, ctx);
+ status |= gr_mat_set_coo_mat(dmat2, coo_mat, ctx);
+ status |= gr_mat_neg(dmat, dmat2, ctx);
+ status |= gr_mat_set_coo_mat(dmat2, coo_mat2, ctx);
+ if (gr_mat_equal(dmat, dmat2, ctx) == T_FALSE)
+ {
+ status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ status |= gr_coo_mat_print_nz(coo_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing neg csr\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 80, 0, T_TRUE, state, ctx);
+ status |= gr_csr_mat_set_coo_mat(csr_mat, coo_mat, ctx);
+ status |= gr_csr_mat_neg(csr_mat2, csr_mat, ctx);
+ status |= gr_mat_set_csr_mat(dmat2, csr_mat, ctx);
+ status |= gr_mat_neg(dmat, dmat2, ctx);
+ status |= gr_mat_set_csr_mat(dmat2, csr_mat2, ctx);
+ if (gr_mat_equal(dmat, dmat2, ctx) == T_FALSE)
+ {
+ status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\n");
+ status |= gr_csr_mat_print_nz(csr_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing neg lil\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 80, 0, T_TRUE, state, ctx);
+ status |= gr_lil_mat_set_coo_mat(lil_mat, coo_mat, ctx);
+ status |= gr_lil_mat_neg(lil_mat2, lil_mat, ctx);
+ status |= gr_mat_set_lil_mat(dmat2, lil_mat, ctx);
+ status |= gr_mat_neg(dmat, dmat2, ctx);
+ status |= gr_mat_set_lil_mat(dmat2, lil_mat2, ctx);
+ if (gr_mat_equal(dmat, dmat2, ctx) == T_FALSE)
+ {
+ status |= gr_lil_mat_print_nz(lil_mat, ctx); flint_printf("\n");
+ status |= gr_lil_mat_print_nz(lil_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_csr_mat_clear(csr_mat, ctx);
+ gr_csr_mat_clear(csr_mat2, ctx);
+ gr_lil_mat_clear(lil_mat, ctx);
+ gr_lil_mat_clear(lil_mat2, ctx);
+ gr_coo_mat_clear(coo_mat, ctx);
+ gr_coo_mat_clear(coo_mat2, ctx);
+ gr_mat_clear(dmat, ctx);
+ gr_mat_clear(dmat2, ctx);
+ return status;
+}
+
+int test_add_sub(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j;
+ slong M = 20;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ truth_t eq;
+ gr_csr_mat_t csr_mat, csr_mat2;
+ gr_lil_mat_t lil_mat, lil_mat2;
+ gr_coo_mat_t coo_mat, coo_mat2;
+ gr_mat_t dmat, dmat2;
+
+ gr_mat_init(dmat, M, N, ctx);
+ gr_mat_init(dmat2, M, N, ctx);
+ gr_coo_mat_init(coo_mat, M, N, ctx);
+ gr_coo_mat_init(coo_mat2, M, N, ctx);
+ gr_csr_mat_init(csr_mat, M, N, ctx);
+ gr_csr_mat_init(csr_mat2, M, N, ctx);
+ gr_lil_mat_init(lil_mat, M, N, ctx);
+ gr_lil_mat_init(lil_mat2, M, N, ctx);
+
+ //flint_printf("Testing add sub\n");
+ for (i = 0; i < 2 * n_tests; i++)
+ {
+ status = GR_SUCCESS;
+ //flint_printf("%d\n", i);
+ j = i % 2; // Add or subtract
+
+ status |= gr_coo_mat_randtest(coo_mat, 80, 0, T_TRUE, state, ctx);
+ status |= gr_coo_mat_randtest(coo_mat2, 80, 0, T_TRUE, state, ctx);
+ status |= gr_lil_mat_set_coo_mat(lil_mat, coo_mat, ctx);
+ status |= gr_lil_mat_set_coo_mat(lil_mat2, coo_mat2, ctx);
+ status |= gr_mat_set_lil_mat(dmat, lil_mat, ctx);
+ status |= gr_mat_set_lil_mat(dmat2, lil_mat2, ctx);
+ if (j == 0)
+ {
+ status |= gr_lil_mat_add(lil_mat, lil_mat, lil_mat2, ctx);
+ status |= gr_mat_add(dmat, dmat, dmat2, ctx);
+ }
+ else
+ {
+ status |= gr_lil_mat_sub(lil_mat, lil_mat, lil_mat2, ctx);
+ status |= gr_mat_sub(dmat, dmat, dmat2, ctx);
+ }
+
+ if (status == GR_UNABLE)
+ continue;
+
+ status |= gr_mat_set_lil_mat(dmat2, lil_mat, ctx);
+ eq = gr_mat_equal(dmat, dmat2, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ flint_printf(
+ "\ni = %d, j = %d, equal = %d, status = %d\n",
+ i, j, eq, status
+ );
+ gr_ctx_println(ctx);
+ flint_printf("mat = "); gr_mat_print(dmat, ctx); flint_printf("\n");
+ flint_printf("mat2 = "); gr_mat_print(dmat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_csr_mat_clear(csr_mat, ctx);
+ gr_csr_mat_clear(csr_mat2, ctx);
+ gr_lil_mat_clear(lil_mat, ctx);
+ gr_lil_mat_clear(lil_mat2, ctx);
+ gr_coo_mat_clear(coo_mat, ctx);
+ gr_coo_mat_clear(coo_mat2, ctx);
+ gr_mat_clear(dmat, ctx);
+ gr_mat_clear(dmat2, ctx);
+ return status;
+}
+
+int test_accum_mul_scalar(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j;
+ slong M = 20;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ truth_t eq;
+ gr_lil_mat_t lil_mat, lil_mat2;
+ gr_coo_mat_t coo_mat, coo_mat2;
+ gr_mat_t dmat, dmat2;
+ gr_ptr gr_c;
+
+ GR_TMP_INIT(gr_c, ctx);
+ gr_mat_init(dmat, M, N, ctx);
+ gr_mat_init(dmat2, M, N, ctx);
+ gr_coo_mat_init(coo_mat, M, N, ctx);
+ gr_coo_mat_init(coo_mat2, M, N, ctx);
+ gr_lil_mat_init(lil_mat, M, N, ctx);
+ gr_lil_mat_init(lil_mat2, M, N, ctx);
+
+ //flint_printf("Testing addmul submul\n");
+ for (i = 0; i < 2 * n_tests; i++)
+ {
+ status = GR_SUCCESS;
+ //flint_printf("%d\n", i);
+ j = i % 2; // Add or subtract
+
+ status |= gr_coo_mat_randtest(coo_mat, 80, 0, T_TRUE, state, ctx);
+ status |= gr_coo_mat_randtest(coo_mat2, 80, 0, T_TRUE, state, ctx);
+ status |= gr_lil_mat_set_coo_mat(lil_mat, coo_mat, ctx);
+ status |= gr_lil_mat_set_coo_mat(lil_mat2, coo_mat2, ctx);
+ status |= gr_mat_set_lil_mat(dmat, lil_mat, ctx);
+ status |= gr_mat_set_lil_mat(dmat2, lil_mat2, ctx);
+
+ status |= gr_randtest_not_zero(gr_c, state, ctx);
+ if (j == 0)
+ {
+ status |= gr_lil_mat_addmul_scalar(lil_mat, lil_mat2, gr_c, ctx);
+ status |= gr_mat_addmul_scalar(dmat, dmat2, gr_c, ctx);
+ }
+ else
+ {
+ status |= gr_lil_mat_submul_scalar(lil_mat, lil_mat2, gr_c, ctx);
+ status |= gr_mat_submul_scalar(dmat, dmat2, gr_c, ctx);
+ }
+ status |= gr_mat_set_lil_mat(dmat2, lil_mat, ctx);
+ eq = gr_mat_equal(dmat, dmat2, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ flint_printf(
+ "\ni = %d, j = %d, equal = %d, status = %d\n",
+ i, j, eq, status
+ );
+ gr_ctx_println(ctx);
+ flint_printf("dmat = "); gr_mat_print(dmat, ctx); flint_printf("\n");
+ flint_printf("dmat2 = "); gr_mat_print(dmat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_lil_mat_clear(lil_mat, ctx);
+ gr_lil_mat_clear(lil_mat2, ctx);
+ gr_coo_mat_clear(coo_mat, ctx);
+ gr_coo_mat_clear(coo_mat2, ctx);
+ gr_mat_clear(dmat, ctx);
+ gr_mat_clear(dmat2, ctx);
+ GR_TMP_CLEAR(gr_c, ctx);
+ return status;
+}
+
+#define TEST_OP_SCALAR(STATUS, MAT_TYPE, OP, SCALAR_TYPE, MAT, MAT2, DMAT, DMAT2, C, CTX) \
+ STATUS |= gr_##MAT_TYPE##_mat_##OP##_##SCALAR_TYPE(MAT2, MAT, C, CTX); \
+ STATUS |= gr_mat_##OP##_##SCALAR_TYPE(DMAT2, DMAT, C, CTX); \
+
+
+#define TEST_MUL_SCALAR(STATUS, K, MAT_TYPE, SCALAR_TYPE, MAT, MAT2, DMAT, DMAT2, C, CTX) { \
+ if (K == 1) \
+ { TEST_OP_SCALAR(STATUS, MAT_TYPE, div, SCALAR_TYPE, MAT, MAT2, DMAT, DMAT2, C, CTX) } \
+ else \
+ { \
+ TEST_OP_SCALAR(STATUS, MAT_TYPE, mul, SCALAR_TYPE, MAT, MAT2, DMAT, DMAT2, C, CTX) \
+ if (K == 2) \
+ { TEST_OP_SCALAR(STATUS, MAT_TYPE, divexact, SCALAR_TYPE, MAT2, MAT2, DMAT2, DMAT2, C, CTX) } \
+ } \
+}
+
+int test_mul_div_scalar(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j, k, l;
+ slong M = 20;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_csr_mat_t csr_mat, csr_mat2;
+ gr_lil_mat_t lil_mat, lil_mat2;
+ gr_coo_mat_t coo_mat, coo_mat2;
+ gr_mat_t dmat, dmat2;
+ slong c;
+ ulong uc;
+ gr_ptr gr_c;
+ fmpz_t fmpz_c;
+ fmpq_t fmpq_c;
+ truth_t eq;
+
+ fmpz_init(fmpz_c);
+ fmpq_init(fmpq_c);
+
+ GR_TMP_INIT(gr_c, ctx);
+ gr_mat_init(dmat, M, N, ctx);
+ gr_mat_init(dmat2, M, N, ctx);
+ gr_coo_mat_init(coo_mat, M, N, ctx);
+ gr_coo_mat_init(coo_mat2, M, N, ctx);
+ gr_csr_mat_init(csr_mat, M, N, ctx);
+ gr_csr_mat_init(csr_mat2, M, N, ctx);
+ gr_lil_mat_init(lil_mat, M, N, ctx);
+ gr_lil_mat_init(lil_mat2, M, N, ctx);
+
+ //flint_printf("Testing mul div scalar\n");
+ for (i = 0; i < 54 * n_tests; i++)
+ {
+ j = i % 6; // Which type of scalar
+ k = (i / 6) % 3; // Mul, div, or mul + divexact
+ l = (i / 18) % 3; // CSR, LIL, or COO mat
+ //flint_printf("\nTesting (%d, %d, %d)\n", l, k, j);
+ if ((j == 4 || k == 1) && gr_ctx_is_field(ctx) != T_TRUE)
+ continue;
+ if (k == 2 && (gr_ctx_is_exact(ctx) != T_TRUE || gr_ctx_is_integral_domain(ctx) != T_TRUE))
+ continue;
+ status |= gr_coo_mat_randtest(coo_mat, 80, 0, T_TRUE, state, ctx);
+ status |= gr_mat_set_coo_mat(dmat, coo_mat, ctx);
+ if (l == 0)
+ status |= gr_csr_mat_set_coo_mat(csr_mat, coo_mat, ctx);
+ else if (l == 1)
+ status |= gr_lil_mat_set_coo_mat(lil_mat, coo_mat, ctx);
+
+ //flint_printf("\nmat = "); status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ //flint_printf("\nmat = "); status |= gr_mat_print(dmat, ctx); flint_printf("\n");
+ switch(j)
+ {
+ case 0:
+ //flint_printf("Testing scalar\n");
+ status |= gr_randtest_not_zero(gr_c, state, ctx);
+ if (l == 0)
+ TEST_MUL_SCALAR(status, k, csr, scalar, csr_mat, csr_mat2, dmat, dmat2, gr_c, ctx)
+ else if (l == 1)
+ TEST_MUL_SCALAR(status, k, lil, scalar, lil_mat, lil_mat2, dmat, dmat2, gr_c, ctx)
+ else
+ TEST_MUL_SCALAR(status, k, coo, scalar, coo_mat, coo_mat2, dmat, dmat2, gr_c, ctx)
+ break;
+ case 1:
+ c = n_randint(state, 0);
+ //flint_printf("Testing scalar_si = %ld\n", c);
+ if (l == 0)
+ TEST_MUL_SCALAR(status, k, csr, scalar_si, csr_mat, csr_mat2, dmat, dmat2, c, ctx)
+ else if (l == 1)
+ TEST_MUL_SCALAR(status, k, lil, scalar_si, lil_mat, lil_mat2, dmat, dmat2, c, ctx)
+ else
+ TEST_MUL_SCALAR(status, k, coo, scalar_si, coo_mat, coo_mat2, dmat, dmat2, c, ctx)
+ break;
+ case 2:
+ //flint_printf("Testing scalar_ui\n");
+ uc = n_randint(state, 0);
+ if (l == 0)
+ TEST_MUL_SCALAR(status, k, csr, scalar_ui, csr_mat, csr_mat2, dmat, dmat2, uc, ctx)
+ else if (l == 1)
+ TEST_MUL_SCALAR(status, k, lil, scalar_ui, lil_mat, lil_mat2, dmat, dmat2, uc, ctx)
+ else
+ TEST_MUL_SCALAR(status, k, coo, scalar_ui, coo_mat, coo_mat2, dmat, dmat2, uc, ctx)
+ break;
+ case 3:
+ //flint_printf("Testing scalar_fmpz\n");
+ fmpz_randtest_not_zero(fmpz_c, state, 32);
+ if (l == 0)
+ TEST_MUL_SCALAR(status, k, csr, scalar_fmpz, csr_mat, csr_mat2, dmat, dmat2, fmpz_c, ctx)
+ else if (l == 1)
+ TEST_MUL_SCALAR(status, k, lil, scalar_fmpz, lil_mat, lil_mat2, dmat, dmat2, fmpz_c, ctx)
+ else
+ TEST_MUL_SCALAR(status, k, coo, scalar_fmpz, coo_mat, coo_mat2, dmat, dmat2, fmpz_c, ctx)
+ break;
+ case 4:
+ //flint_printf("Testing scalar_fmpq\n");
+ fmpq_randtest_not_zero(fmpq_c, state, 32);
+ if (l == 0)
+ TEST_MUL_SCALAR(status, k, csr, scalar_fmpq, csr_mat, csr_mat2, dmat, dmat2, fmpq_c, ctx)
+ else if (l == 1)
+ TEST_MUL_SCALAR(status, k, lil, scalar_fmpq, lil_mat, lil_mat2, dmat, dmat2, fmpq_c, ctx)
+ else
+ TEST_MUL_SCALAR(status, k, coo, scalar_fmpq, coo_mat, coo_mat2, dmat, dmat2, fmpq_c, ctx)
+ break;
+ case 5:
+ //flint_printf("Testing scalar_2exp_si\n");
+ // Scaling by 2^c always done with multiply, even if it is a divide
+ c = n_randint(state, 32) + 1;
+ if (k == 0 || k == 2)
+ {
+ if (l == 0)
+ { TEST_OP_SCALAR(status, csr, mul, scalar_2exp_si, csr_mat, csr_mat2, dmat, dmat2, c, ctx) }
+ else if (l == 1)
+ { TEST_OP_SCALAR(status, lil, mul, scalar_2exp_si, lil_mat, lil_mat2, dmat, dmat2, c, ctx) }
+ else
+ { TEST_OP_SCALAR(status, coo, mul, scalar_2exp_si, coo_mat, coo_mat2, dmat, dmat2, c, ctx) }
+ }
+ if (k == 1 || k == 2)
+ {
+ if (l == 0)
+ { TEST_OP_SCALAR(status, csr, mul, scalar_2exp_si, csr_mat, csr_mat2, dmat, dmat2, -c, ctx) }
+ else if (l == 1)
+ { TEST_OP_SCALAR(status, lil, mul, scalar_2exp_si, lil_mat, lil_mat2, dmat, dmat2, -c, ctx) }
+ else
+ { TEST_OP_SCALAR(status, coo, mul, scalar_2exp_si, coo_mat, coo_mat2, dmat, dmat2, -c, ctx) }
+ }
+ break;
+ }
+
+ // If any operation not allowed, just skip test
+ if (status == GR_UNABLE || status == GR_DOMAIN) // TODO: FIXME
+ {
+ status = GR_SUCCESS;
+ continue;
+ }
+ //gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ //gr_sparse_vec_print_nz(vec2, ctx); flint_printf("\n");
+
+ if (l == 0)
+ status |= gr_mat_set_csr_mat(dmat, csr_mat2, ctx);
+ else if (l == 1)
+ status |= gr_mat_set_lil_mat(dmat, lil_mat2, ctx);
+ else
+ status |= gr_mat_set_coo_mat(dmat, coo_mat2, ctx);
+ eq = gr_mat_equal(dmat, dmat2, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ flint_printf(
+ "j = %d, k = %d, equal = %d, status = %d\n",
+ j, k, eq, status
+ );
+ gr_ctx_println(ctx);
+ flint_printf("\n\ndmat: "); status |= gr_mat_print(dmat, ctx); flint_printf("\n");
+ flint_printf("\n\ndmat2: "); status |= gr_mat_print(dmat2, ctx); flint_printf("\n");
+ if (l == 0)
+ {
+ status |= gr_csr_mat_set_mat(csr_mat, dmat2, ctx);
+ status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\n");
+ status |= gr_csr_mat_print_nz(csr_mat2, ctx); flint_printf("\n");
+ }
+ else if (l == 1)
+ {
+ status |= gr_lil_mat_set_mat(lil_mat, dmat2, ctx);
+ status |= gr_lil_mat_print_nz(lil_mat, ctx); flint_printf("\n");
+ status |= gr_lil_mat_print_nz(lil_mat2, ctx); flint_printf("\n");
+ }
+ else
+ {
+ status |= gr_coo_mat_set_mat(coo_mat, dmat2, ctx);
+ status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ status |= gr_coo_mat_print_nz(coo_mat2, ctx); flint_printf("\n");
+ }
+ return GR_TEST_FAIL;
+ }
+ }
+
+ fmpz_clear(fmpz_c);
+ fmpq_clear(fmpq_c);
+ gr_csr_mat_clear(csr_mat, ctx);
+ gr_csr_mat_clear(csr_mat2, ctx);
+ gr_lil_mat_clear(lil_mat, ctx);
+ gr_lil_mat_clear(lil_mat2, ctx);
+ gr_coo_mat_clear(coo_mat, ctx);
+ gr_coo_mat_clear(coo_mat2, ctx);
+ gr_mat_clear(dmat, ctx);
+ gr_mat_clear(dmat2, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_mat_arith, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ CHECK_TEST(test_neg(state, ctx), "Sparse matrix negation");
+ CHECK_TEST(test_add_sub(state, ctx), "Sparse matrix addition, subtraction, and multiplication");
+ CHECK_TEST(test_accum_mul_scalar(state, ctx), "Sparse matrix scalar addmul and submul");
+ CHECK_TEST(test_mul_div_scalar(state, ctx), "Sparse matrix scalar multiplication and division");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_mat/test/t-conversion.c b/src/gr_sparse_mat/test/t-conversion.c
new file mode 100644
index 0000000000..3e3fb772fe
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-conversion.c
@@ -0,0 +1,168 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_conversion(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong M = 20;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_coo_mat_t coo_mat, coo_mat2;
+ gr_csr_mat_t csr_mat, csr_mat2;
+ gr_lil_mat_t lil_mat, lil_mat2;
+ gr_mat_t dmat, dmat2;
+
+ gr_mat_init(dmat, M, N, ctx);
+ gr_mat_init(dmat2, M, N, ctx);
+ gr_coo_mat_init(coo_mat, M, N, ctx);
+ gr_coo_mat_init(coo_mat2, M, N, ctx);
+ gr_csr_mat_init(csr_mat, M, N, ctx);
+ gr_csr_mat_init(csr_mat2, M, N, ctx);
+ gr_lil_mat_init(lil_mat, M, N, ctx);
+ gr_lil_mat_init(lil_mat2, M, N, ctx);
+
+ //flint_printf("Testing copy\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 40, 0, T_TRUE, state, ctx);
+ status |= gr_coo_mat_set(coo_mat2, coo_mat, ctx);
+ if (gr_coo_mat_equal(coo_mat, coo_mat2, ctx) == T_FALSE)
+ {
+ status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ status |= gr_coo_mat_print_nz(coo_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing from/to dense mat\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_mat_randtest(dmat, state, ctx);
+ status |= gr_coo_mat_set_mat(coo_mat, dmat, ctx);
+ status |= gr_mat_set_coo_mat(dmat2, coo_mat, ctx);
+ if (T_FALSE == gr_mat_equal(dmat, dmat2, ctx))
+ {
+ status |= gr_mat_print(dmat, ctx); flint_printf("\n");
+ status |= gr_mat_print(dmat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ // flint_printf("Testing from/to sparse mat\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 10, 0, T_TRUE, state, ctx);
+ status |= gr_mat_set_coo_mat(dmat, coo_mat, ctx);
+ status |= gr_coo_mat_set_mat(coo_mat2, dmat, ctx);
+ if (T_FALSE == gr_coo_mat_equal(coo_mat, coo_mat2, ctx))
+ {
+ status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ status |= gr_coo_mat_print_nz(coo_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ // flint_printf("Testing coo -> csr -> csr -> lil -> mat -> csr -> coo \n");
+ for (i = 0; i < 2 * n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 10, 0, (i % 2) ? T_FALSE : T_TRUE, state, ctx);
+ //flint_printf("\n\ncoo_mat = "); status |= gr_coo_mat_print_nz(coo_mat, ctx);
+ status |= gr_csr_mat_set_coo_mat(csr_mat, coo_mat, ctx);
+ //flint_printf("\n\ncsr_mat = "); status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\nnnz = %d\n", csr_mat->nnz);
+ status |= gr_csr_mat_set(csr_mat2, csr_mat, ctx);
+ //flint_printf("\n\ncsr_mat = "); status |= gr_csr_mat_print_nz(csr_mat2, ctx); flint_printf("\nnnz = %d\n", csr_mat2->nnz);
+ status |= gr_lil_mat_set_csr_mat(lil_mat, csr_mat2, ctx);
+ //flint_printf("\n\nlil_mat = "); status |= gr_lil_mat_print_nz(lil_mat, ctx);
+ status |= gr_mat_set_lil_mat(dmat, lil_mat, ctx);
+ //flint_printf("\n\nmat = "); status |= gr_mat_print(dmat, ctx);
+ status |= gr_csr_mat_set_mat(csr_mat2, dmat, ctx);
+ //flint_printf("\n\ncsr_mat = "); status |= gr_csr_mat_print_nz(csr_mat2, ctx);
+ status |= gr_coo_mat_set_csr_mat(coo_mat2, csr_mat2, ctx);
+ //flint_printf("\n\ncoo_mat = "); status |= gr_coo_mat_print_nz(coo_mat2, ctx); flint_printf("\nnnz = %d\n", coo_mat2->nnz);
+ if
+ (
+ T_FALSE == gr_csr_mat_equal(csr_mat, csr_mat2, ctx) ||
+ T_FALSE == gr_coo_mat_equal(coo_mat, coo_mat2, ctx)
+ )
+ {
+ status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ status |= gr_csr_mat_print_nz(csr_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ // flint_printf("Testing coo -> lil -> lil -> csr -> mat -> lil -> coo\n");
+ for (i = 0; i < 2 * n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(coo_mat, 10, 0, (i % 2) ? T_FALSE : T_TRUE, state, ctx);
+ // flint_printf("\n\ncoo_mat = "); status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\nnnz = %d\n", coo_mat->nnz);
+ status |= gr_lil_mat_set_coo_mat(lil_mat, coo_mat, ctx);
+ // flint_printf("\n\nlil_mat = "); status |= gr_lil_mat_print_nz(lil_mat, ctx); flint_printf("\nnnz = %d\n", lil_mat->nnz);
+ status |= gr_lil_mat_set(lil_mat2, lil_mat, ctx);
+ // flint_printf("\n\nlil_mat = "); status |= gr_lil_mat_print_nz(lil_mat2, ctx); flint_printf("\nnnz = %d\n", lil_mat2->nnz);
+ status |= gr_csr_mat_set_lil_mat(csr_mat, lil_mat2, ctx);
+ // flint_printf("\n\ncsr_mat = "); status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\nnnz = %d\n", csr_mat->nnz);
+ status |= gr_mat_set_csr_mat(dmat, csr_mat, ctx);
+ // flint_printf("\n\nmat = "); status |= gr_mat_print(dmat, ctx);
+ status |= gr_lil_mat_set_mat(lil_mat2, dmat, ctx);
+ // flint_printf("\n\nlil_mat = "); status |= gr_lil_mat_print_nz(lil_mat2, ctx); flint_printf("\nnnz = %d\n", lil_mat2->nnz);
+ status |= gr_coo_mat_set_lil_mat(coo_mat2, lil_mat2, ctx);
+ // flint_printf("\n\ncoo_mat = "); status |= gr_coo_mat_print_nz(coo_mat2, ctx); flint_printf("\nnnz = %d\n", coo_mat2->nnz);
+ if
+ (
+ T_FALSE == gr_lil_mat_equal(lil_mat, lil_mat2, ctx) ||
+ T_FALSE == gr_coo_mat_equal(coo_mat, coo_mat2, ctx)
+ )
+ {
+ status |= gr_coo_mat_print_nz(coo_mat, ctx); flint_printf("\n");
+ status |= gr_lil_mat_print_nz(lil_mat2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_csr_mat_clear(csr_mat, ctx);
+ gr_csr_mat_clear(csr_mat2, ctx);
+ gr_lil_mat_clear(lil_mat, ctx);
+ gr_lil_mat_clear(lil_mat2, ctx);
+ gr_coo_mat_clear(coo_mat, ctx);
+ gr_coo_mat_clear(coo_mat2, ctx);
+ gr_mat_clear(dmat, ctx);
+ gr_mat_clear(dmat2, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_mat_conversion, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_println(ctx);
+ CHECK_TEST(test_conversion(state, ctx), "Conversion between various sparse representations (and dense)");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_mat/test/t-init.c b/src/gr_sparse_mat/test/t-init.c
new file mode 100644
index 0000000000..2fcadf33fa
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-init.c
@@ -0,0 +1,247 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_init_csr(gr_ctx_t ctx)
+{
+ gr_csr_mat_t mat;
+ gr_csr_mat_init(mat, 5, 6, ctx);
+ if (mat->r != 5 || mat->c != 6 || mat->alloc != 0 || mat->nnz != 0 || mat->rows == NULL || mat->cols != NULL || mat->nzs != NULL)
+ {
+ flint_printf("Failed init CSR test\n");
+ return GR_TEST_FAIL;
+ }
+ gr_csr_mat_clear(mat, ctx);
+ return GR_SUCCESS;
+}
+
+int test_init_lil(gr_ctx_t ctx)
+{
+ gr_lil_mat_t mat;
+ gr_lil_mat_init(mat, 5, 6, ctx);
+ if (mat->r != 5 || mat->c != 6 || mat->nnz != 0 || mat->rows == NULL)
+ {
+ flint_printf("Failed init LIL test\n");
+ return GR_TEST_FAIL;
+ }
+ gr_lil_mat_clear(mat, ctx);
+ return GR_SUCCESS;
+}
+
+int test_init_coo(gr_ctx_t ctx)
+{
+ gr_coo_mat_t mat;
+ gr_coo_mat_init(mat, 5, 6, ctx);
+ if (mat->r != 5 || mat->c != 6 || mat->alloc != 0 || mat->nnz != 0 || mat->rows != NULL || mat->cols != NULL || mat->nzs != NULL || mat->is_canonical != T_TRUE)
+ {
+ flint_printf("Failed init COO test\n");
+ return GR_TEST_FAIL;
+ }
+ gr_coo_mat_clear(mat, ctx);
+ return GR_SUCCESS;
+}
+
+int test_init_from_entries_canonical(flint_rand_t state, gr_ctx_t ctx)
+{
+ // TODO: randomize length and nonzero cols
+ slong i;
+ gr_coo_mat_t mat;
+ gr_ptr entries;
+ gr_ptr temp;
+ int status = GR_SUCCESS;
+ truth_t eq;
+ slong sz = ctx->sizeof_elem;
+ slong r = 5;
+ slong c = 10;
+ slong N = 15;
+ ulong rows[25] = {0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4};
+ ulong cols[25] = {0, 2, 3, 1, 4, 6, 8, 9, 5, 7, 0, 1, 3, 5, 6, 8, 2, 4, 7, 9};
+
+ //flint_printf("Running init test\n");
+ GR_TMP_INIT(temp, ctx);
+ GR_TMP_INIT_VEC(entries, N, ctx);
+ for (i = 0; i < N; ++i)
+ status |= gr_randtest_not_zero(GR_ENTRY(entries, i, sz), state, ctx);
+ if (status != GR_SUCCESS)
+ {
+ flint_printf("Failed to make random numbers!\n");
+ return GR_SUCCESS; // Not my fault!
+ }
+
+ gr_coo_mat_init(mat, r, c, ctx);
+ status |= gr_coo_mat_from_entries(mat, rows, cols, entries, N, T_TRUE, ctx);
+
+ // Check parameters
+ if (status != GR_SUCCESS || mat->r != r || mat->c != c || mat->alloc != 15 || mat->nnz != 15)
+ {
+ flint_printf("Bad params! %ld %ld %ld %ld\n", mat->r, mat->c, mat->alloc, mat->nnz);
+ return GR_TEST_FAIL;
+ }
+
+ // Check indices and entries
+ for (i = 0; i < N; ++i)
+ {
+ if (*gr_coo_mat_row_ptr(mat, i) != rows[i])
+ {
+ flint_printf("Bad row index!\n");
+ return GR_TEST_FAIL;
+ }
+ if (*gr_coo_mat_col_ptr(mat, i) != cols[i])
+ {
+ flint_printf("Bad column index!\n");
+ return GR_TEST_FAIL;
+ }
+ eq = gr_equal(gr_coo_mat_entry_ptr(mat, i, ctx), GR_ENTRY(entries, i, sz), ctx);
+ if (eq == T_FALSE)
+ {
+ flint_printf("Bad elements!\n");
+ return GR_TEST_FAIL;
+ }
+ }
+ gr_coo_mat_clear(mat, ctx);
+ GR_TMP_CLEAR_VEC(entries, N, ctx);
+ return status;
+}
+
+int test_init_from_entries_internal(ulong *rows, ulong *cols, gr_srcptr entries, slong r, slong c, slong num, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong sz = ctx->sizeof_elem;
+ slong i, j, k;
+ gr_coo_mat_t mat;
+ gr_ptr temp, temp2, temp3;
+
+ ///flint_printf("\n\n\nRunning internal test\n");
+ GR_TMP_INIT2(temp, temp2, ctx);
+ gr_coo_mat_init(mat, r, c, ctx);
+ status |= gr_coo_mat_from_entries(mat, rows, cols, entries, num, T_FALSE, ctx);
+ if (status != GR_SUCCESS)
+ return GR_TEST_FAIL;
+
+ gr_coo_mat_canonicalize(mat, ctx);
+ if (mat->is_canonical == T_FALSE)
+ return GR_TEST_FAIL;
+
+ // Check every entry (including the zeroes)
+ for (i = 0; i < r; i++)
+ {
+ for (j = 0; j < c; j++)
+ {
+ // Compute the expected value of the entry
+ status |= gr_zero(temp, ctx);
+ for (k = 0; k < num; k++)
+ if (rows[k] == i && cols[k] == j)
+ status |= gr_add(temp, temp, GR_ENTRY(entries, k, sz), ctx);
+
+ status |= gr_coo_mat_get_entry(temp2, mat, i, j, ctx);
+ temp3 = gr_coo_mat_find_entry(mat, i, j, ctx);
+ if (
+ gr_equal(temp, temp2, ctx) == T_FALSE ||
+ (temp3 == NULL && gr_is_zero(temp, ctx) == T_FALSE) ||
+ (temp3 != NULL && gr_is_zero(temp3, ctx) == T_TRUE) ||
+ (temp3 != NULL && gr_equal(temp, temp3, ctx) == T_FALSE)
+ )
+ {
+ flint_printf("Failed on %d!\n", i);
+ gr_println(temp, ctx);
+ gr_println(temp2, ctx);
+ flint_printf("%p\n", temp3);
+ if (temp3 != NULL)
+ gr_println(temp3, ctx);
+ return GR_TEST_FAIL;
+ }
+ }
+ }
+ GR_TMP_CLEAR2(temp, temp2, ctx);
+ gr_coo_mat_clear(mat, ctx);
+ return status;
+}
+
+int test_init_from_entries(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j;
+ int status = GR_SUCCESS;
+ slong r = 5;
+ slong c = 10;
+ slong N = 20;
+ ulong rows[20] = {0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4};
+ ulong cols[20] = {0, 2, 3, 1, 4, 6, 8, 9, 5, 7, 0, 1, 3, 5, 6, 8, 2, 4, 7, 9};
+ gr_ptr entries;
+ GR_TMP_INIT_VEC(entries, N, ctx);
+ status |= _gr_vec_randtest(entries, state, N, ctx);
+
+ // Randomly permute the rows and columns
+ for (i = 0; i < N; ++i)
+ {
+ j = n_randint(state, N);
+ FLINT_SWAP(ulong, rows[i], rows[j]);
+ j = n_randint(state, N);
+ FLINT_SWAP(ulong, cols[i], cols[j]);
+ }
+ status |= test_init_from_entries_internal(rows, cols, entries, r, c, N, ctx);
+
+ GR_TMP_CLEAR_VEC(entries, N, ctx);
+ return status;
+}
+
+int test_init_from_entries_adversarial(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ int status = GR_SUCCESS;
+ slong sz = ctx->sizeof_elem;
+ slong r = 5;
+ slong c = 10;
+ slong N = 20;
+ ulong rows[20] = {1, 0, 3, 4, 2, 1, 4, 1, 3, 2, 3, 3, 4, 0, 0, 1, 2, 4, 3, 0};
+ ulong cols[20] = {0, 2, 3, 1, 4, 8, 8, 0, 5, 7, 0, 3, 3, 5, 6, 8, 4, 2, 5, 9};
+ slong ents[20] = {-1, 3, 1, 2, 0, 6, 4, 1, 3, 1, -4, -1, 2, 9, 1, -6, 4, 0, -2, 4};
+
+ gr_ptr entries;
+ GR_TMP_INIT_VEC(entries, N, ctx);
+
+ for (i = 0; i < N; i++)
+ status |= gr_set_si(GR_ENTRY(entries, i, sz), ents[i], ctx);
+ status |= test_init_from_entries_internal(rows, cols, entries, r, c, N, ctx);
+
+ GR_TMP_CLEAR_VEC(entries, N, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_mat_init, state)
+{
+ int i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while(1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_println(ctx);
+ CHECK_TEST(test_init_csr(ctx), "Init CSR matrix")
+ CHECK_TEST(test_init_lil(ctx), "Init LIL matrix")
+ CHECK_TEST(test_init_coo(ctx), "Init COO matrix")
+ CHECK_TEST(test_init_from_entries_canonical(state, ctx), "Init from entries in canonical form");
+ CHECK_TEST(test_init_from_entries(state, ctx), "Init from entries in noncanonical order");
+ CHECK_TEST(test_init_from_entries_adversarial(state, ctx), "Init from entries in adversarial order");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_mat/test/t-lu.c b/src/gr_sparse_mat/test/t-lu.c
new file mode 100644
index 0000000000..1aff24855d
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-lu.c
@@ -0,0 +1,164 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+/* #include */
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_lu(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong rep, r, c, i, j, rk, *P, *Q;
+ gr_ptr val;
+ gr_coo_mat_t Aorig;
+ gr_lil_mat_t A, LU, L, U;
+ gr_mat_t dL, dU, dLU;
+ int status = GR_SUCCESS;
+
+ flint_printf("decomposing PAQ = LU....");
+ fflush(stdout);
+
+ for (rep = 0; rep < 200; rep++)
+ {
+ if (rep % 20 == 0) {flint_printf("."); fflush(stdout);}
+
+ r = n_randint(state, 20);
+ c = n_randint(state, 20);
+
+ P = flint_malloc(r*sizeof(*P));
+ Q = flint_malloc(c*sizeof(*P));
+
+ gr_coo_mat_init(Aorig, r, c, ctx);
+ gr_lil_mat_init(A, r, c, ctx);
+ gr_lil_mat_init(LU, r, c, ctx);
+ gr_lil_mat_init(L, r, c, ctx);
+ gr_lil_mat_init(U, r, c, ctx);
+
+ status |= gr_coo_mat_randtest(Aorig, FLINT_MIN(r * 3, c * 3), 0, T_TRUE, state, ctx);
+ status |= gr_lil_mat_set_coo_mat(A, Aorig, ctx);
+ status |= gr_lil_mat_lu(&rk, P, Q, L, U, A, ctx);
+
+ /* Check that L is lower triangular (with ones on diagonal up to rank) */
+ for (i = 0; i < r; ++i)
+ {
+ val = gr_sparse_vec_find_entry(&L->rows[i], i, ctx);
+ if (i < rk && (val == NULL || gr_is_one(val, ctx) == T_FALSE))
+ {
+ flint_printf("FAIL: L does not have unit diagonal up to the rank\n");
+ status |= gr_lil_mat_print_nz(L, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ for (j = 0; j < L->rows[i].nnz; ++j)
+ {
+ if (L->rows[i].inds[j] > i)
+ {
+ flint_printf("FAIL: L not lower triangular\n");
+ status |= gr_lil_mat_print_nz(L, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ if (L->rows[i].inds[j] >= rk)
+ {
+ flint_printf("FAIL: L not trivial past the rank\n");
+ status |= gr_lil_mat_print_nz(L, ctx); flint_printf("\n");
+ /*gr_lil_mat_print_pretty(L, ctx);*/
+ return GR_TEST_FAIL;
+ }
+ }
+ }
+ /* Check that U is upper triangular (with nonzero diagonal up to rank) */
+ for (i = 0; i < r; ++i)
+ {
+ val = gr_sparse_vec_find_entry(&U->rows[i], i, ctx);
+ if (i < rk && (val == NULL || gr_is_zero(val, ctx) == T_TRUE))
+ {
+ flint_printf("FAIL: U does not have nonzero diagonal\n");
+ status |= gr_lil_mat_print_nz(U, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ if (i >= rk && U->rows[i].nnz != 0)
+ {
+ flint_printf("FAIL: U not trivial past the rank\n");
+ status |= gr_lil_mat_print_nz(U, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ for (j = 0; j < U->rows[i].nnz; ++j)
+ {
+ if (U->rows[i].inds[j] < i)
+ {
+ flint_printf("FAIL: U not upper triangular\n");
+ status |= gr_lil_mat_print_nz(U, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+ }
+
+ //flint_printf("L = "); status |= gr_lil_mat_print_nz(L, ctx); flint_printf("\n");
+ //flint_printf("U = "); status |= gr_lil_mat_print_nz(U, ctx); flint_printf("\n");
+
+ // Check that PAQ = LU
+ gr_mat_init(dL, r, c, ctx);
+ gr_mat_init(dU, r, c, ctx);
+ gr_mat_init(dLU, r, c, ctx);
+ status |= gr_mat_set_lil_mat(dL, L, ctx);
+ status |= gr_mat_set_lil_mat(dU, U, ctx);
+ status |= gr_mat_mul(dLU, dL, dU, ctx);
+ status |= gr_lil_mat_set_mat(LU, dLU, ctx);
+ status |= gr_lil_mat_permute_rows(A, P, ctx);
+ status |= gr_lil_mat_permute_cols(A, Q, ctx);
+ if (status != GR_SUCCESS || gr_lil_mat_equal(A, LU, ctx) == T_FALSE)
+ {
+ flint_printf("FAIL: PAQ != LU\n");
+ flint_printf("PAQ = "); status |= gr_lil_mat_print_nz(A, ctx); flint_printf("\n");
+ flint_printf("LU = "); status |= gr_lil_mat_print_nz(LU, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+
+ flint_free(P);
+ flint_free(Q);
+ gr_coo_mat_clear(Aorig, ctx);
+ gr_lil_mat_clear(A, ctx);
+ gr_lil_mat_clear(U, ctx);
+ gr_lil_mat_clear(L, ctx);
+ gr_lil_mat_clear(LU, ctx);
+ gr_mat_clear(dL, ctx);
+ gr_mat_clear(dU, ctx);
+ gr_mat_clear(dLU, ctx);
+ }
+
+ flint_printf("PASS\n");
+ return status;
+}
+
+
+TEST_FUNCTION_START(gr_sparse_mat_lu, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 1; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ //gr_ctx_init_random(ctx, state);
+ gr_ctx_init_fq_nmod(ctx, 65521, 1, "a");
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE || gr_ctx_is_exact(ctx) != T_TRUE || gr_ctx_is_field(ctx) != T_TRUE || gr_ctx_is_finite(ctx) != T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ CHECK_TEST(test_lu(state, ctx), "Sparse matrix LU decomposition");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_mat/test/t-mul.c b/src/gr_sparse_mat/test/t-mul.c
new file mode 100644
index 0000000000..d41577f3d7
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-mul.c
@@ -0,0 +1,194 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_mul(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j;
+ slong M = 5;
+ slong N = 15;
+ slong O = 2;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ truth_t eq;
+ gr_csr_mat_t csr_mat;
+ gr_lil_mat_t lil_mat;
+ gr_coo_mat_t coo_mat;
+ gr_ptr u, v, v2;
+ gr_mat_t dmat, U, V, V2;
+ gr_mat_t UT, VT, VT2;
+
+ gr_coo_mat_init(coo_mat, M, N, ctx);
+ gr_csr_mat_init(csr_mat, M, N, ctx);
+ gr_lil_mat_init(lil_mat, M, N, ctx);
+ gr_mat_init(dmat, M, N, ctx);
+
+ GR_TMP_INIT_VEC(u, N, ctx);
+ gr_mat_init(UT, O, N, ctx);
+ gr_mat_init(U, N, O, ctx);
+
+ GR_TMP_INIT_VEC(v, M, ctx);
+ gr_mat_init(VT, O, M, ctx);
+ gr_mat_init(V, M, O, ctx);
+
+ GR_TMP_INIT_VEC(v2, M, ctx);
+ gr_mat_init(VT2, O, M, ctx);
+ gr_mat_init(V2, M, O, ctx);
+
+ //flint_printf("Testing sparse matrix-vector multiplication\n");
+ for (i = 0; i < 2*n_tests; i++)
+ {
+ j = i % 2; // CSR or LIL mat
+
+ // Get random sparse matrix
+ status |= gr_coo_mat_randtest(coo_mat, 20, 0, T_TRUE, state, ctx);
+ status |= gr_mat_set_coo_mat(dmat, coo_mat, ctx);
+ if (j == 0)
+ status |= gr_csr_mat_set_coo_mat(csr_mat, coo_mat, ctx);
+ else
+ status |= gr_lil_mat_set_coo_mat(lil_mat, coo_mat, ctx);
+
+ // Get a random dense vector
+ status |= _gr_vec_randtest(u, state, N, ctx);
+
+ // Compute matrix multiply using sparse and dense reprs
+ if (j == 0)
+ status |= gr_csr_mat_mul_vec(v, csr_mat, u, ctx);
+ else
+ status |= gr_lil_mat_mul_vec(v, lil_mat, u, ctx);
+ status |= gr_mat_mul_vec(v2, dmat, u, ctx);
+
+ eq = _gr_vec_equal(v, v2, M, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ gr_ctx_println(ctx);
+ if (j == 0)
+ {
+ status |= flint_printf("mat = "); status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\n");
+ }
+ else
+ {
+ status |= flint_printf("mat = "); status |= gr_lil_mat_print_nz(lil_mat, ctx); flint_printf("\n");
+ }
+ flint_printf("u = "); status |= _gr_vec_print(u, N, ctx); flint_printf("\n");
+ flint_printf("v = "); status |= _gr_vec_print(v, M, ctx); flint_printf("\n");
+ flint_printf("v2 = "); status |= _gr_vec_print(v2, M, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+
+ // Get a random dense column matrix
+ status |= gr_mat_randtest(UT, state, ctx);
+
+ // Compute matrix multiply using sparse and dense reprs
+ if (j == 0)
+ status |= gr_csr_mat_mul_mat_transpose(VT, csr_mat, UT, ctx);
+ else
+ status |= gr_lil_mat_mul_mat_transpose(VT, lil_mat, UT, ctx);
+ status |= gr_mat_transpose(U, UT, ctx);
+ status |= gr_mat_mul(V2, dmat, U, ctx);
+ status |= gr_mat_transpose(VT2, V2, ctx);
+
+ eq = gr_mat_equal(VT, VT2, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ gr_ctx_println(ctx);
+ flint_printf("j = %d, eq = %d, status = %d\n", j, eq, status);
+ if (j == 0)
+ {
+ status |= flint_printf("mat = "); status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\n");
+ }
+ else
+ {
+ status |= flint_printf("mat = "); status |= gr_lil_mat_print_nz(lil_mat, ctx); flint_printf("\n");
+ }
+ flint_printf("UT = "); status |= gr_mat_print(UT, ctx); flint_printf("\n");
+ flint_printf("VT = "); status |= gr_mat_print(VT, ctx); flint_printf("\n");
+ flint_printf("VT2 = "); status |= gr_mat_print(VT2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+
+ // Get a random dense row matrix
+ status |= gr_mat_randtest(U, state, ctx);
+
+ // Compute matrix multiply using sparse and dense reprs
+ if (j == 0)
+ status |= gr_csr_mat_mul_mat(V, csr_mat, U, ctx);
+ else
+ status |= gr_lil_mat_mul_mat(V, lil_mat, U, ctx);
+
+ //flint_printf("\nBefore matmul U = "); gr_mat_print(U, ctx); flint_printf("\n");
+ status |= gr_mat_mul(V2, dmat, U, ctx);
+ //flint_printf("\nAfter matmul U = "); gr_mat_print(U, ctx); flint_printf("\n");
+
+ eq = gr_mat_equal(V, V2, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ gr_ctx_println(ctx);
+ flint_printf("j = %d, eq = %d, status = %d\n", j, eq, status);
+ if (j == 0)
+ {
+ status |= flint_printf("mat = "); status |= gr_csr_mat_print_nz(csr_mat, ctx); flint_printf("\n");
+ }
+ else
+ {
+ status |= flint_printf("mat = "); status |= gr_lil_mat_print_nz(lil_mat, ctx); flint_printf("\n");
+ }
+ flint_printf("U = "); status |= gr_mat_print(U, ctx); flint_printf("\n");
+ flint_printf("V = "); status |= gr_mat_print(V, ctx); flint_printf("\n");
+ flint_printf("V2 = "); status |= gr_mat_print(V2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_mat_clear(U, ctx);
+ gr_mat_clear(UT, ctx);
+ GR_TMP_CLEAR_VEC(u, N, ctx);
+
+ gr_mat_clear(V, ctx);
+ gr_mat_clear(VT, ctx);
+ GR_TMP_CLEAR_VEC(v, M, ctx);
+
+ gr_mat_clear(VT2, ctx);
+ gr_mat_clear(V2, ctx);
+ GR_TMP_CLEAR_VEC(v2, M, ctx);
+
+ gr_mat_clear(dmat, ctx);
+ gr_csr_mat_clear(csr_mat, ctx);
+ gr_lil_mat_clear(lil_mat, ctx);
+ gr_coo_mat_clear(coo_mat, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_mat_mul, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_println(ctx);
+ CHECK_TEST(test_mul(state, ctx), "Sparse matrix-vector and sparse matrix-matrix products");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_mat/test/t-randtest.c b/src/gr_sparse_mat/test/t-randtest.c
new file mode 100644
index 0000000000..026fdf78af
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-randtest.c
@@ -0,0 +1,79 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_randtest(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong M = 16;
+ slong N = 64;
+ slong n_tests = 10;
+ int status = GR_SUCCESS;
+ gr_coo_mat_t mat;
+ gr_coo_mat_init(mat, M, N, ctx);
+
+ //flint_printf("Testing w/o replacement\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(mat, 128, 0, T_TRUE, state, ctx);
+ if (gr_coo_mat_is_canonical(mat, ctx) == T_FALSE || mat->nnz != 128)
+ return GR_TEST_FAIL;
+ }
+
+ //flint_printf("Testing w/ replacement\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest(mat, 32, 1, T_TRUE, state, ctx);
+ if (gr_coo_mat_is_canonical(mat, ctx) == T_FALSE || mat->nnz > 32 || mat->nnz < 24)
+ return GR_TEST_FAIL;
+ }
+
+ //flint_printf("Testing w/ prob\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_coo_mat_randtest_prob(mat, 0.125, state, ctx);
+ if (gr_coo_mat_is_canonical(mat, ctx) == T_FALSE || mat->nnz > 192 || mat->nnz < 64)
+ {
+ status |= gr_coo_mat_print_nz(mat, ctx); flint_printf("%ld\n", mat->nnz);
+ return GR_TEST_FAIL;
+ }
+
+ }
+ gr_coo_mat_clear(mat, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_mat_randtest, state)
+{
+ int i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while(1)
+ {
+ //gr_ctx_init_nmod(ctx, 2147483647);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_print(ctx); flint_printf("\n");
+
+ CHECK_TEST(test_randtest(state, ctx), "Test random sparse matrix generation");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_mat/test/t-rref.c b/src/gr_sparse_mat/test/t-rref.c
new file mode 100644
index 0000000000..9cecebb887
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-rref.c
@@ -0,0 +1,100 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+/* #include */
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_rref(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong rep, r, c, sparse_rank, dense_rank;
+ gr_coo_mat_t Aorig;
+ gr_lil_mat_t A, B, R;
+ gr_mat_t dA, dR;
+ int status = GR_SUCCESS;
+
+ flint_printf("converting A to reduced row echelon form....");
+ fflush(stdout);
+
+ for (rep = 0; rep < 200; rep++)
+ {
+ if (rep % 20 == 0) {flint_printf("."); fflush(stdout);}
+
+ r = n_randint(state, 20);
+ c = n_randint(state, 20);
+ gr_coo_mat_init(Aorig, r, c, ctx);
+ gr_lil_mat_init(A, r, c, ctx);
+ gr_lil_mat_init(B, r, c, ctx);
+ gr_lil_mat_init(R, r, c, ctx);
+ gr_mat_init(dA, r, c, ctx);
+ gr_mat_init(dR, r, c, ctx);
+
+ status |= gr_coo_mat_randtest(Aorig, FLINT_MIN(r * 3, c * 3), 0, T_TRUE, state, ctx);
+ if (status != GR_SUCCESS)
+ {
+ flint_printf("Some failure!\n");
+ return GR_TEST_FAIL;
+ }
+ status |= gr_lil_mat_set_coo_mat(A, Aorig, ctx);
+ status |= gr_mat_set_lil_mat(dA, A, ctx);
+
+ status |= gr_lil_mat_rref(&sparse_rank, R, A, ctx);
+ status |= gr_mat_rref(&dense_rank, dR, dA, ctx);
+ status |= gr_lil_mat_set_mat(B, dR, ctx);
+
+ if (status != GR_SUCCESS || gr_lil_mat_equal(R, B, ctx) == T_FALSE)
+ {
+ flint_printf("FAIL!\n");
+ flint_printf("A = "); status |= gr_lil_mat_print_nz(A, ctx); flint_printf("\n");
+ flint_printf("R = "); status |= gr_lil_mat_print_nz(R, ctx); flint_printf("\n");
+ flint_printf("dR = "); status |= gr_lil_mat_print_nz(B, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+
+ gr_coo_mat_clear(Aorig, ctx);
+ gr_lil_mat_clear(A, ctx);
+ gr_lil_mat_clear(B, ctx);
+ gr_lil_mat_clear(R, ctx);
+ gr_mat_clear(dA, ctx);
+ gr_mat_clear(dR, ctx);
+ }
+
+ flint_printf("PASS\n");
+ return status;
+}
+
+
+TEST_FUNCTION_START(gr_sparse_mat_rref, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 1; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ //gr_ctx_init_random(ctx, state);
+ gr_ctx_init_fq_nmod(ctx, 65521, 1, "a");
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE || gr_ctx_is_exact(ctx) != T_TRUE || gr_ctx_is_field(ctx) != T_TRUE || gr_ctx_is_finite(ctx) != T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ CHECK_TEST(test_rref(state, ctx), "Sparse matrix reduced row echelon form");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
+
diff --git a/src/gr_sparse_mat/test/t-solve.c b/src/gr_sparse_mat/test/t-solve.c
new file mode 100644
index 0000000000..88adcd6304
--- /dev/null
+++ b/src/gr_sparse_mat/test/t-solve.c
@@ -0,0 +1,165 @@
+/*
+ Copyright (C) 2010 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "test_helpers.h"
+#include "gr_sparse_mat.h"
+/* #include */
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_solve(flint_rand_t state, gr_ctx_t ctx)
+{
+ int iter, ret;
+ slong rep, nreps = 100, r, c, i;
+ gr_coo_mat_t Aorig;
+ gr_lil_mat_t A, At;
+ gr_ptr x, x2, b, Atb, Ax, AtAx;
+ gr_mat_t dmat;
+ slong niters[6] = {0, 0, 0, 0, 0, 0};
+ slong psol[6] = {0, 0, 0, 0, 0, 0};
+ slong nosol[6] = {0, 0, 0, 0, 0, 0};
+ slong sol[6] = {0, 0, 0, 0, 0, 0};
+ /* double elapsed[6] = {0, 0, 0, 0, 0}; */
+ char *names[6] = {"rref", "lu", "Lanczos", "block Lanczos", "Wiedemann", "block Wiedemann"};
+ int status = GR_SUCCESS;
+ /* struct timeval start, end; */
+
+ flint_printf("solving Ax = b....");
+ fflush(stdout);
+
+ for (rep = 0; rep < nreps; rep++)
+ {
+ if (rep % 5==0) {flint_printf("."); fflush(stdout);}
+
+ c = r = 50 + n_randint(state, 5);
+
+ gr_coo_mat_init(Aorig, r, c, ctx);
+ gr_lil_mat_init(A, r, c, ctx);
+ gr_mat_init(dmat, r, c, ctx);
+ GR_TMP_INIT_VEC(x, c, ctx);
+ GR_TMP_INIT_VEC(x2, c, ctx);
+ GR_TMP_INIT_VEC(b, r, ctx);
+ GR_TMP_INIT_VEC(Ax, r, ctx);
+
+ // Set up a solvable problem
+ status |= gr_coo_mat_randtest_prob(Aorig, .2, state, ctx);
+ status |= gr_lil_mat_set_coo_mat(A, Aorig, ctx);
+ status |= _gr_vec_randtest(x, state, c, ctx);
+ status |= gr_lil_mat_mul_vec(b, A, x, ctx);
+ status |= gr_mat_set_lil_mat(dmat, A, ctx);
+ // flint_printf("A = "); gr_mat_print(dmat, ctx); flint_printf("\n");
+ // flint_printf("x = "); _gr_vec_print(x, c, ctx); flint_printf("\n");
+ // flint_printf("b = "); _gr_vec_print(b, r, ctx); flint_printf("\n");
+
+ for (i = 0; i < 6; ++i)
+ {
+ iter = 0;
+ /* gettimeofday(&start, NULL); */
+ // TODO: rref and lu solving
+ if (i == 0 || i == 1)
+ continue;
+ switch (i)
+ {
+ //case 0: ret = gr_lil_mat_solve_rref(x2, A, b, ctx); break;
+ //case 1: ret = gr_lil_mat_solve_lu(x2, A, b, ctx); break;
+ case 2: do ret = gr_lil_mat_solve_lanczos(x2, A, b, state, ctx); while (ret == GR_UNABLE && ++iter < 3); break;
+ case 3: do ret = gr_lil_mat_solve_block_lanczos(x2, A, b, 8, state, ctx); while (ret == GR_UNABLE && ++iter < 3); break;
+ case 4: ret = gr_lil_mat_solve_wiedemann(x2, A, b, ctx); break;
+ case 5: do ret = gr_lil_mat_solve_block_wiedemann(x2, A, b, 8, state, ctx); while (ret == GR_TEST_FAIL && ++iter < 3); break;
+ }
+ // /* gettimeofday(&end, NULL);
+ // elapsed[i] += (end.tv_sec - start.tv_sec) + .000001*(end.tv_usec-start.tv_usec); */
+ if (ret == GR_UNABLE) nosol[i] += 1;
+ else
+ {
+ niters[i] += iter;
+ status |= gr_lil_mat_mul_vec(Ax, A, x2, ctx);
+ if (_gr_vec_equal(b, Ax, A->r, ctx) == T_FALSE)
+ {
+ if (i == 2 || i == 3)
+ {
+ gr_lil_mat_init(At, c, r, ctx);
+ GR_TMP_INIT_VEC(AtAx, c, ctx);
+ GR_TMP_INIT_VEC(Atb, c, ctx);
+ status |= gr_lil_mat_transpose(At, A, ctx);
+ status |= gr_lil_mat_mul_vec(AtAx, At, Ax, ctx);
+ status |= gr_lil_mat_mul_vec(Atb, At, b, ctx);
+ if (_gr_vec_equal(AtAx, Atb, A->c, ctx) != T_TRUE)
+ {
+ flint_printf("FAIL on %s: AtAx != Atb\n", names[i]);
+ abort();
+ }
+ else psol[i] += 1;
+ GR_TMP_CLEAR_VEC(AtAx, c, ctx);
+ GR_TMP_CLEAR_VEC(Atb, c, ctx);
+ gr_lil_mat_clear(At, ctx);
+ }
+ else
+ {
+ flint_printf("FAIL on %s: Ax != b\n", names[i]);
+ return GR_TEST_FAIL;
+ }
+ }
+ else
+ {
+ sol[i] += 1;
+ }
+ }
+ }
+
+ GR_TMP_CLEAR_VEC(x, c, ctx);
+ GR_TMP_CLEAR_VEC(x2, c, ctx);
+ GR_TMP_CLEAR_VEC(b, r, ctx);
+ GR_TMP_CLEAR_VEC(Ax, r, ctx);
+ gr_lil_mat_clear(A, ctx);
+ gr_mat_clear(dmat, ctx);
+ }
+
+ flint_printf("PASS\n");
+ for (i = 0; i < 6; ++i)
+ {
+ flint_printf("Solved %d with %s\n", sol[i], names[i]);
+ /* flint_printf("\tAverage time: %lf\n", elapsed[i]/nreps); */
+ if (nosol[i])
+ flint_printf("\tFound no solution for %wd/%wd examples\n", nosol[i], nreps);
+ if (psol[i])
+ flint_printf("\tFound pseudo-solution for %wd/%wd examples\n", psol[i], nreps);
+ if (niters[i])
+ flint_printf("\tRequired %f extra iters per solution (on average).\n", (double)niters[i]/nreps);
+ }
+ return status;
+}
+
+
+TEST_FUNCTION_START(gr_sparse_mat_solve, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 1; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ gr_ctx_init_random(ctx, state);
+ gr_ctx_init_fq_nmod(ctx, 65521, 1, "a");
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE || gr_ctx_is_exact(ctx) != T_TRUE || gr_ctx_is_field(ctx) != T_TRUE || gr_ctx_is_finite(ctx) != T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ CHECK_TEST(test_solve(state, ctx), "Sparse matrix solving");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
+
diff --git a/src/gr_sparse_mat/to_dense.c b/src/gr_sparse_mat/to_dense.c
new file mode 100644
index 0000000000..b202c88038
--- /dev/null
+++ b/src/gr_sparse_mat/to_dense.c
@@ -0,0 +1,75 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_mat.h"
+
+int gr_mat_set_csr_mat(gr_mat_t dst, const gr_csr_mat_t src, gr_ctx_t ctx)
+{
+ ulong row, nz;
+ int status = GR_SUCCESS;
+ size_t sz = ctx->sizeof_elem;
+
+ if (dst->r != src->r || dst->c != src->c)
+ return GR_DOMAIN;
+
+ status |= gr_mat_zero(dst, ctx);
+ for (row = 0; row < src->r; ++row)
+ {
+ for (nz = src->rows[row]; nz < src->rows[row + 1]; ++nz)
+ {
+ status |= gr_set(
+ GR_MAT_ENTRY(dst, row, src->cols[nz], sz),
+ GR_ENTRY(src->nzs, nz, sz),
+ ctx
+ );
+ }
+ }
+ return status;
+}
+
+int gr_mat_set_lil_mat(gr_mat_t dst, const gr_lil_mat_t src, gr_ctx_t ctx)
+{
+ ulong row;
+ int status = GR_SUCCESS;
+
+ if (dst->r != src->r || dst->c != src->c)
+ return GR_DOMAIN;
+
+ for (row = 0; row < src->r; ++row)
+ {
+ status |= gr_vec_set_sparse_vec(dst->rows[row], &src->rows[row], ctx);
+ }
+ return status;
+}
+
+int gr_mat_set_coo_mat(gr_mat_t dst, const gr_coo_mat_t src, gr_ctx_t ctx)
+{
+ ulong nz;
+ int status = GR_SUCCESS;
+ gr_ptr dst_entry;
+ gr_srcptr src_entry;
+ size_t sz = ctx->sizeof_elem;
+
+ if (dst->r != src->r || dst->c != src->c)
+ return GR_DOMAIN;
+
+ status |= gr_mat_zero(dst, ctx);
+ for (nz = 0; nz < src->nnz; ++nz)
+ {
+ dst_entry = GR_MAT_ENTRY(dst, src->rows[nz], src->cols[nz], sz);
+ src_entry = GR_ENTRY(src->nzs, nz, sz);
+ if (src->is_canonical == T_TRUE)
+ status |= gr_set(dst_entry, src_entry, ctx);
+ else
+ status |= gr_add(dst_entry, dst_entry, src_entry, ctx);
+ }
+ return status;
+}
diff --git a/src/gr_sparse_mat/transpose.c b/src/gr_sparse_mat/transpose.c
new file mode 100644
index 0000000000..2b92ead8f2
--- /dev/null
+++ b/src/gr_sparse_mat/transpose.c
@@ -0,0 +1,67 @@
+/*
+ Copyright (C) 2011 Fredrik Johansson
+ Copyright (C) 2020 Kartik Venkatram
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include
+#include "gr_sparse_mat.h"
+
+int gr_lil_mat_transpose(gr_lil_mat_t B, const gr_lil_mat_t A, gr_ctx_t ctx)
+{
+ slong r, c, i, j, nz_idx, sz;
+ gr_sparse_vec_struct *Arow, *Brow;
+ int status = GR_SUCCESS;
+
+ sz = ctx->sizeof_elem;
+ r = gr_sparse_mat_nrows(A, ctx);
+ c = gr_sparse_mat_ncols(A, ctx);
+
+ if (r != gr_sparse_mat_ncols(B, ctx) || c != gr_sparse_mat_nrows(B, ctx))
+ return GR_DOMAIN;
+
+ // TODO: handle
+ if (A == B)
+ return GR_DOMAIN;
+
+ /* Get number of nnzs in each column of A (thus each row of B) */
+ for (j = 0; j < c; ++j)
+ {
+ B->rows[j].nnz = 0;
+ }
+ for (i = 0; i < A->r; ++i)
+ {
+ Arow = &A->rows[i];
+ for (nz_idx = 0; nz_idx < A->rows[i].nnz; ++nz_idx)
+ {
+ B->rows[Arow->inds[nz_idx]].nnz += 1;
+ }
+ }
+ /* Allocate space for nnz and reset counters */
+ for (j = 0; j < c; ++j)
+ {
+ Brow = &B->rows[j];
+ gr_sparse_vec_fit_nnz(Brow, Brow->nnz, ctx);
+ Brow->nnz = 0;
+ }
+ /* Put entries into transposed matrix */
+ for (i = 0; i < r; ++i)
+ {
+ Arow = &A->rows[i];
+ for (nz_idx = 0; nz_idx < Arow->nnz; ++nz_idx)
+ {
+ Brow = &B->rows[Arow->inds[nz_idx]];
+ Brow->inds[Brow->nnz] = i;
+ status |= gr_set(GR_ENTRY(Brow->nzs, Brow->nnz, sz), GR_ENTRY(Arow->nzs, nz_idx, sz), ctx);
+ (Brow->nnz)++;
+ }
+ }
+ return status;
+}
diff --git a/src/gr_sparse_mat/write.c b/src/gr_sparse_mat/write.c
new file mode 100644
index 0000000000..ace2499600
--- /dev/null
+++ b/src/gr_sparse_mat/write.c
@@ -0,0 +1,86 @@
+#include
+#include "gr_sparse_mat.h"
+
+int
+gr_csr_mat_write_nz(gr_stream_t out, const gr_csr_mat_t mat, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong row;
+ gr_sparse_vec_t tmp;
+ gr_stream_write(out, "[");
+ for (row = 0; row < mat->r; row++)
+ {
+ gr_stream_write(out, "\n\t");
+ _gr_csr_mat_borrow_row(tmp, mat, row, ctx);
+ status |= gr_sparse_vec_write_nz(out, tmp, ctx);
+ if (row < mat->r - 1)
+ gr_stream_write(out, ", ");
+ }
+ gr_stream_write(out, "\n]");
+ return status;
+}
+
+int
+gr_lil_mat_write_nz(gr_stream_t out, const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong row;
+
+ gr_stream_write(out, "[");
+ for (row = 0; row < mat->r; row++)
+ {
+ gr_stream_write(out, "\n\t");
+ status |= gr_sparse_vec_write_nz(out, &mat->rows[row], ctx);
+ if (row < mat->r - 1)
+ gr_stream_write(out, ", ");
+ }
+ gr_stream_write(out, "\n]");
+ return status;
+}
+
+int
+gr_coo_mat_write_nz(gr_stream_t out, const gr_coo_mat_t mat, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong nz;
+ slong sz = ctx->sizeof_elem;
+
+ gr_stream_write(out, "[");
+ for (nz = 0; nz < mat->nnz; nz++)
+ {
+ gr_stream_write(out, "\n\t");
+ gr_stream_write(out, "(");
+ gr_stream_write_si(out, mat->rows[nz]);
+ gr_stream_write(out, ", ");
+ gr_stream_write_si(out, mat->cols[nz]);
+ gr_stream_write(out, "): ");
+ status |= gr_write(out, GR_ENTRY(mat->nzs, nz, sz), ctx);
+ if (nz < mat->nnz - 1)
+ gr_stream_write(out, ", ");
+ }
+ gr_stream_write(out, "\n]");
+ return status;
+}
+
+int gr_csr_mat_print_nz(const gr_csr_mat_t mat, gr_ctx_t ctx)
+{
+ gr_stream_t out;
+ gr_stream_init_file(out, stdout);
+ return gr_csr_mat_write_nz(out, mat, ctx);
+}
+
+int gr_lil_mat_print_nz(const gr_lil_mat_t mat, gr_ctx_t ctx)
+{
+ gr_stream_t out;
+ gr_stream_init_file(out, stdout);
+ return gr_lil_mat_write_nz(out, mat, ctx);
+}
+
+int gr_coo_mat_print_nz(const gr_coo_mat_t mat, gr_ctx_t ctx)
+{
+ gr_stream_t out;
+ gr_stream_init_file(out, stdout);
+ return gr_coo_mat_write_nz(out, mat, ctx);
+}
+
+
diff --git a/src/gr_sparse_vec.h b/src/gr_sparse_vec.h
new file mode 100644
index 0000000000..273779f011
--- /dev/null
+++ b/src/gr_sparse_vec.h
@@ -0,0 +1,286 @@
+/*
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#ifndef GR_SPARSE_VEC_H
+#define GR_SPARSE_VEC_H
+
+#ifdef GR_SPARSE_VEC_INLINES_C
+#define GR_SPARSE_VEC_INLINE
+#else
+#define GR_SPARSE_VEC_INLINE static inline
+#endif
+
+#include
+#include "gr.h"
+#include "gr_vec.h"
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/**
+ * Types and basic access
+**/
+typedef struct
+{
+ slong length;
+ slong nnz;
+ slong alloc;
+ ulong *inds;
+ gr_ptr nzs;
+}
+gr_sparse_vec_struct;
+
+typedef gr_sparse_vec_struct gr_sparse_vec_t[1];
+
+GR_SPARSE_VEC_INLINE void
+gr_sparse_vec_init(gr_sparse_vec_t vec, slong len, gr_ctx_t ctx)
+{
+ memset(vec, 0, sizeof(gr_sparse_vec_t));
+ vec->length = len;
+}
+
+GR_SPARSE_VEC_INLINE void
+gr_sparse_vec_clear(gr_sparse_vec_t vec, gr_ctx_t ctx)
+{
+ _gr_vec_clear(vec->nzs, vec->alloc, ctx);
+ flint_free(vec->inds);
+ flint_free(vec->nzs);
+ memset(vec, 0, sizeof(gr_sparse_vec_t));
+}
+
+GR_SPARSE_VEC_INLINE slong
+gr_sparse_vec_length(const gr_sparse_vec_t vec)
+{ return vec->length; }
+
+void gr_sparse_vec_set_length(gr_sparse_vec_t vec, slong len, gr_ctx_t ctx);
+
+GR_SPARSE_VEC_INLINE slong
+gr_sparse_vec_nnz(const gr_sparse_vec_t vec)
+{ return vec->nnz; }
+
+void gr_sparse_vec_fit_nnz(gr_sparse_vec_t vec, slong nnz, gr_ctx_t ctx);
+void gr_sparse_vec_shrink_to_nnz(gr_sparse_vec_t vec, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_sparse_vec_from_entries(gr_sparse_vec_t vec, ulong * inds, gr_srcptr entries, slong nnz, truth_t is_canonical, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_sparse_vec_randtest(gr_sparse_vec_t vec, slong nnz, int replacement, flint_rand_t state, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int
+gr_sparse_vec_randtest_prob(gr_sparse_vec_t vec, double prob, flint_rand_t state, gr_ctx_t ctx);
+
+GR_SPARSE_VEC_INLINE WARN_UNUSED_RESULT int
+gr_sparse_vec_is_valid(gr_sparse_vec_t vec, gr_ctx_t ctx)
+{
+ slong i, sz = ctx->sizeof_elem;
+
+ // Check that parameters are valid
+ if (vec->nnz > vec->alloc || vec->alloc > vec->length)
+ return 0;
+
+ // Check that entries are valid
+ for (i = 0; i < vec->nnz; ++i)
+ {
+ if (vec->inds[i] >= vec->length || (i > 0 && vec->inds[i] <= vec->inds[i-1]))
+ return 0;
+ if (gr_is_zero(GR_ENTRY(vec->nzs, i, sz), ctx) == T_TRUE)
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Getting, setting, and conversion
+**/
+
+#define GR_SPARSE_VEC_IND(vec, nz_idx) (vec)->inds[nz_idx]
+#define GR_SPARSE_VEC_ENTRY(vec, nz_idx, sz) GR_ENTRY((vec)->nzs, nz_idx, sz)
+
+GR_SPARSE_VEC_INLINE ulong *
+gr_sparse_vec_ind_ptr(gr_sparse_vec_t vec, slong nz_idx, gr_ctx_t ctx)
+{
+ if (nz_idx < 0 || nz_idx >= vec->nnz)
+ return NULL;
+ return vec->inds + nz_idx;
+}
+
+GR_SPARSE_VEC_INLINE const ulong *
+gr_sparse_vec_ind_srcptr(const gr_sparse_vec_t vec, slong nz_idx, gr_ctx_t ctx)
+{
+ if (nz_idx < 0 || nz_idx >= vec->nnz)
+ return NULL;
+ return vec->inds + nz_idx;
+}
+
+GR_SPARSE_VEC_INLINE gr_ptr
+gr_sparse_vec_entry_ptr(gr_sparse_vec_t vec, slong nz_idx, gr_ctx_t ctx)
+{
+ if (nz_idx < 0 || nz_idx >= vec->nnz)
+ return NULL;
+ return GR_SPARSE_VEC_ENTRY(vec, nz_idx, ctx->sizeof_elem);
+}
+
+GR_SPARSE_VEC_INLINE gr_srcptr
+gr_sparse_vec_entry_srcptr(const gr_sparse_vec_t vec, slong nz_idx, gr_ctx_t ctx)
+{
+ if (nz_idx < 0 || nz_idx >= vec->nnz)
+ return NULL;
+ return GR_SPARSE_VEC_ENTRY(vec, nz_idx, ctx->sizeof_elem);
+}
+
+WARN_UNUSED_RESULT gr_ptr gr_sparse_vec_find_entry(gr_sparse_vec_t vec, slong ind, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_get_entry(gr_ptr dst, gr_sparse_vec_t vec, slong ind, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_set_entry(gr_sparse_vec_t vec, slong ind, gr_srcptr entry, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_set(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_slice(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong ind_start, slong ind_end, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_set_vec(gr_sparse_vec_t dst, gr_srcptr src, slong len, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_set_sparse_vec(gr_ptr dst, gr_sparse_vec_t src, gr_ctx_t ctx);
+
+GR_SPARSE_VEC_INLINE void gr_sparse_vec_swap(gr_sparse_vec_t vec1, gr_sparse_vec_t vec2, gr_ctx_t ctx)
+{
+ FLINT_SWAP(slong, vec1->alloc, vec2->alloc);
+ FLINT_SWAP(gr_ptr, vec1->nzs, vec2->nzs);
+ FLINT_SWAP(ulong *, vec1->inds, vec2->inds);
+ FLINT_SWAP(slong, vec1->length, vec2->length);
+ FLINT_SWAP(slong, vec1->nnz, vec2->nnz);
+}
+
+GR_SPARSE_VEC_INLINE
+void gr_sparse_vec_zero(gr_sparse_vec_t vec, gr_ctx_t ctx)
+{ vec->nnz = 0; }
+
+GR_SPARSE_VEC_INLINE
+int gr_sparse_vec_one(gr_sparse_vec_t vec, slong ind, gr_ctx_t ctx)
+{
+ if (ind < 0)
+ return GR_DOMAIN;
+
+ gr_sparse_vec_fit_nnz(vec, 1, ctx);
+ vec->inds[0] = ind;
+ vec->nnz = 1;
+ return gr_one(vec->nzs, ctx);
+}
+
+WARN_UNUSED_RESULT int gr_sparse_vec_permute_inds(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong * p, gr_ctx_t ctx);
+
+/**
+ * Comparison
+**/
+
+truth_t gr_sparse_vec_equal(const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+GR_SPARSE_VEC_INLINE truth_t gr_sparse_vec_is_zero(const gr_sparse_vec_t vec, gr_ctx_t ctx) { return _gr_vec_is_zero(vec->nzs, vec->nnz, ctx); }
+
+/**
+ * Output
+**/
+
+int gr_sparse_vec_write_nz(gr_stream_t out, const gr_sparse_vec_t vec, gr_ctx_t ctx);
+int gr_sparse_vec_print_nz(const gr_sparse_vec_t vec, gr_ctx_t ctx);
+
+/**
+ * Arithmetic
+**/
+
+// Internal function to count the union of indicies in inds0 and inds1
+slong _gr_sparse_vec_count_unique_inds(const ulong *inds0, slong nnz0, const ulong *inds1, slong nnz1);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_neg(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_update(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_add(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_sub(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx) ;
+WARN_UNUSED_RESULT int gr_sparse_vec_mul(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_add_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_sub_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_other_add_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_other_sub_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_other_mul_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_addmul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_submul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_addmul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_submul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx);
+
+/**
+ * Arithmetic into dense vectors
+**/
+
+WARN_UNUSED_RESULT int gr_vec_update_sparse_vec_nz(gr_ptr dres, const gr_sparse_vec_t src, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_add_sparse_vec(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_sub_sparse_vec(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_mul_sparse_vec_nz(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_div_sparse_vec_nz(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_vec_addmul_sparse_vec_scalar(gr_ptr dres, const gr_sparse_vec_t svec, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_submul_sparse_vec_scalar(gr_ptr dres, const gr_sparse_vec_t svec, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_addmul_sparse_vec_scalar_si(gr_ptr dres, const gr_sparse_vec_t svec, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_submul_sparse_vec_scalar_si(gr_ptr dres, const gr_sparse_vec_t svec, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_addmul_sparse_vec_scalar_fmpz(gr_ptr dres, const gr_sparse_vec_t svec, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_vec_submul_sparse_vec_scalar_fmpz(gr_ptr dres, const gr_sparse_vec_t svec, const fmpz_t c, gr_ctx_t ctx);
+
+/**
+ * Scalar multiplication and division
+**/
+
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_mul_scalar_2exp_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_div_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_div_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_div_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_div_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_div_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx);
+
+WARN_UNUSED_RESULT int gr_sparse_vec_divexact_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_divexact_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_divexact_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_divexact_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_divexact_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx);
+
+/**
+ * Sum and product
+**/
+
+GR_SPARSE_VEC_INLINE WARN_UNUSED_RESULT int
+gr_sparse_vec_sum(gr_ptr dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+{ return _gr_vec_sum(dst, src->nzs, src->nnz, ctx); }
+
+GR_SPARSE_VEC_INLINE WARN_UNUSED_RESULT int
+gr_sparse_vec_nz_product(gr_ptr dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+{ return _gr_vec_product(dst, src->nzs, src->nnz, ctx); }
+
+/**
+ * Dot product
+**/
+
+WARN_UNUSED_RESULT int gr_sparse_vec_dot(gr_ptr dst, gr_srcptr initial, int subtract, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx);
+WARN_UNUSED_RESULT int gr_sparse_vec_dot_vec(gr_ptr dst, gr_srcptr initial, int subtract, const gr_sparse_vec_t src1, gr_srcptr src2, gr_ctx_t ctx);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gr_sparse_vec/arith.c b/src/gr_sparse_vec/arith.c
new file mode 100644
index 0000000000..4ebd048e55
--- /dev/null
+++ b/src/gr_sparse_vec/arith.c
@@ -0,0 +1,397 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_vec.h"
+
+/*
+ Binary arithmetic operations on sparse vectors have a common pattern of
+ riffling through the two input vectors A_VEC and B_VEC to produce an
+ output vector DEST_VEC with indices in sorted order. As the iteration
+ proceeds, for a given index, we need one function FUNC_A if only A_VEC
+ has a nonzero element at that index, another function FUNC_B if only
+ B_VEC has a nonzero element, and a third function FUNC_AB if both are nonzero.
+ During this process, we maintain a triple of indices a_nz_idx, b_nz_idx, and
+ dest_nz_idx, which are used when calling this macro to indicate what element(s)
+ to refer to when calling the appropriate function.
+*/
+
+// Sub macro to swap two entries with their associated indices
+#define GR_SPV_SWAP_INDS(VEC, I, J, SZ, CTX) \
+{ \
+ slong _temp = (VEC)->inds[I]; \
+ (VEC)->inds[I] = (VEC)->inds[J]; \
+ (VEC)->inds[J] = _temp; \
+ gr_swap(GR_ENTRY((VEC)->nzs, (I), (SZ)), GR_ENTRY((VEC)->nzs, (J), (SZ)), (CTX)); \
+}
+
+#define GR_SPV_RFL_TEMPLATE(FUNC_A, FUNC_B, FUNC_AB, DEST_VEC, A_VEC, B_VEC, CTX) \
+ int status; \
+ slong sz, new_nnz, a_nz_idx, b_nz_idx, dest_nz_idx, a_nnz, b_nnz, i; \
+ ulong a_ind, b_ind; \
+ if ((DEST_VEC)->length != (A_VEC)->length || (A_VEC)->length != (B_VEC)->length) \
+ return GR_DOMAIN; \
+ status = GR_SUCCESS; \
+ sz = (CTX)->sizeof_elem; \
+ a_nnz = (A_VEC)->nnz; \
+ b_nnz = (B_VEC)->nnz; \
+ new_nnz = _gr_sparse_vec_count_unique_inds((A_VEC)->inds, a_nnz, (B_VEC)->inds, b_nnz); \
+ gr_sparse_vec_fit_nnz((DEST_VEC), new_nnz, (CTX)); \
+ /* We go backward through the destination, because it might be an in-place operation on a source */ \
+ a_nz_idx = a_nnz-1; \
+ b_nz_idx = b_nnz-1; \
+ dest_nz_idx = new_nnz-1; \
+ while (a_nz_idx >= -1 && b_nz_idx >= -1 && status == GR_SUCCESS) \
+ { \
+ if (a_nz_idx == -1 && b_nz_idx == -1) break; \
+ a_ind = (a_nz_idx >= 0) ? (A_VEC)->inds[a_nz_idx] : -1; \
+ b_ind = (b_nz_idx >= 0) ? (B_VEC)->inds[b_nz_idx] : -1; \
+ if (b_nz_idx == -1 || (a_nz_idx >= 0 && a_ind > b_ind)) \
+ { \
+ status |= (FUNC_A); \
+ (DEST_VEC)->inds[dest_nz_idx] = a_ind; \
+ a_nz_idx--; \
+ } \
+ else if (a_nz_idx == -1 || b_ind > a_ind) \
+ { \
+ status |= (FUNC_B); \
+ (DEST_VEC)->inds[dest_nz_idx] = b_ind; \
+ b_nz_idx--; \
+ } \
+ else \
+ { \
+ status |= (FUNC_AB); \
+ (DEST_VEC)->inds[dest_nz_idx] = a_ind; \
+ a_nz_idx--; \
+ b_nz_idx--; \
+ } \
+ if (T_TRUE != gr_is_zero(GR_ENTRY((DEST_VEC)->nzs, dest_nz_idx, sz), (CTX))) \
+ dest_nz_idx--; \
+ } \
+ /* Move the result to the beginning of the dest vec */ \
+ /* Currently, dest_nz_idx points to one before the start of the legit destination values */ \
+ if (dest_nz_idx >= 0 && !status) \
+ { \
+ new_nnz = (new_nnz-1) - dest_nz_idx; \
+ dest_nz_idx++; \
+ for (i = 0; i < new_nnz; i++) \
+ GR_SPV_SWAP_INDS(DEST_VEC, i, dest_nz_idx + i, sz, CTX); \
+ } \
+ (DEST_VEC)->nnz = new_nnz; \
+ return status;
+
+/* We need some convenience functions for certain simple operations. */
+int gr_neg_other(gr_ptr dst, gr_srcptr src, gr_ctx_t src_ctx, gr_ctx_t ctx)
+{ return (gr_set_other(dst, src, src_ctx, ctx) | gr_neg(dst, dst, ctx)); }
+
+int gr_negmul(gr_ptr dst, gr_srcptr x, gr_srcptr y, gr_ctx_t ctx)
+{ return (gr_mul(dst, x, y, ctx) | gr_neg(dst, dst, ctx)); }
+
+int gr_negmul_si(gr_ptr dst, gr_srcptr x, slong y, gr_ctx_t ctx)
+{ return (gr_mul_si(dst, x, y, ctx) | gr_neg(dst, dst, ctx)); }
+
+// Need a zero-ary function to assign zero
+#define GR_SPV_RFL_ZERO gr_zero(GR_ENTRY(dst->nzs, dest_nz_idx, sz), ctx)
+
+// Convenience macros for applying a unary or binary function
+#define GR_SPV_RFL_UOP(F, DST, DST_IND, SRC, SRC_IND) \
+ F(GR_ENTRY((DST)->nzs, DST_IND, sz), GR_ENTRY((SRC)->nzs, SRC_IND, sz), ctx)
+#define GR_SPV_RFL_BOP(F, DST, DST_IND, SRC1, SRC1_IND, SRC2, SRC2_IND) \
+ F(GR_ENTRY((DST)->nzs, DST_IND, sz), GR_ENTRY((SRC1)->nzs, SRC1_IND, sz), GR_ENTRY((SRC2)->nzs, SRC2_IND, sz), ctx)
+#define GR_SPV_RFL_BOP_SCALAR(F, DST, DST_IND, SRC, SRC_IND, C, CTX) \
+ F(GR_ENTRY((DST)->nzs, DST_IND, sz), GR_ENTRY((SRC)->nzs, SRC_IND, sz), C, CTX)
+
+// Analogous macros for applying a unary or binary function between two contexts
+#define GR_SPV_RFL_UOP_OTHER(F, DST, DST_IND, SRC, SRC_IND, CTX2) \
+ F(GR_ENTRY((DST)->nzs, DST_IND, sz), GR_ENTRY((SRC)->nzs, SRC_IND, (CTX2)->sizeof_elem), CTX2, ctx)
+#define GR_SPV_RFL_BOP_OTHER(F, DST, DST_IND, SRC1, SRC1_IND, SRC2, SRC2_IND, CTX2) \
+ F(GR_ENTRY((DST)->nzs, DST_IND, sz), GR_ENTRY((SRC1)->nzs, SRC1_IND, sz), GR_ENTRY((SRC2)->nzs, SRC2_IND, (CTX2)->sizeof_elem), CTX2, ctx)
+#define GR_SPV_RFL_OTHER_BOP(F, DST, DST_IND, SRC1, SRC1_IND, CTX2, SRC2, SRC2_IND) \
+ F(GR_ENTRY((DST)->nzs, DST_IND, sz), GR_ENTRY((SRC1)->nzs, SRC1_IND, CTX2->sizeof_elem), (CTX2), GR_ENTRY((SRC2)->nzs, SRC2_IND, sz), ctx)
+
+/*
+ Subtemplate for doing accumulated operation from one sparse vector into another.
+ Used to do dst += c * src and dst -= c * src, for c a scalar.
+*/
+#define GR_SPV_ACCUM_TEMPLATE(ELEM_OP, ELEM_ACCUM_OP, DST, SRC, C, CTX) \
+ GR_SPV_RFL_TEMPLATE( \
+ GR_SPV_RFL_UOP(gr_set, DST, dest_nz_idx, DST, a_nz_idx), \
+ GR_SPV_RFL_BOP_SCALAR(ELEM_OP, DST, dest_nz_idx, SRC, b_nz_idx, C, CTX), \
+ GR_SPV_RFL_UOP(gr_set, DST, dest_nz_idx, DST, a_nz_idx) | GR_SPV_RFL_BOP_SCALAR(ELEM_ACCUM_OP, DST, dest_nz_idx, SRC, b_nz_idx, C, CTX), \
+ DST, DST, SRC, CTX\
+ )
+
+int gr_sparse_vec_neg(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+{ return (gr_sparse_vec_set(dst, src, ctx) | _gr_vec_neg(dst->nzs, dst->nzs, dst->nnz, ctx)); }
+
+int
+gr_sparse_vec_update(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, dst, a_nz_idx),
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src, b_nz_idx),
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src, b_nz_idx),
+ dst, dst, src, ctx
+ );
+}
+
+int
+gr_sparse_vec_add(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src1, a_nz_idx),
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src2, b_nz_idx),
+ GR_SPV_RFL_BOP(gr_add, dst, dest_nz_idx, src1, a_nz_idx, src2, b_nz_idx),
+ dst, src1, src2, ctx
+ );
+}
+
+int
+gr_sparse_vec_sub(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src1, a_nz_idx),
+ GR_SPV_RFL_UOP(gr_neg, dst, dest_nz_idx, src2, b_nz_idx),
+ GR_SPV_RFL_BOP(gr_sub, dst, dest_nz_idx, src1, a_nz_idx, src2, b_nz_idx),
+ dst, src1, src2, ctx
+ );
+}
+
+int gr_sparse_vec_mul(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_ZERO,
+ GR_SPV_RFL_ZERO,
+ GR_SPV_RFL_BOP(gr_mul, dst, dest_nz_idx, src1, a_nz_idx, src2, b_nz_idx),
+ dst, src1, src2, ctx
+ );
+}
+
+int
+gr_sparse_vec_add_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src1, a_nz_idx),
+ GR_SPV_RFL_UOP_OTHER(gr_set_other, dst, dest_nz_idx, src2, b_nz_idx, ctx2),
+ GR_SPV_RFL_BOP_OTHER(gr_add_other, dst, dest_nz_idx, src1, a_nz_idx, src2, b_nz_idx, ctx2),
+ dst, src1, src2, ctx
+ );
+}
+
+int
+gr_sparse_vec_sub_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src1, a_nz_idx),
+ GR_SPV_RFL_UOP_OTHER(gr_neg_other, dst, dest_nz_idx, src2, b_nz_idx, ctx2),
+ GR_SPV_RFL_BOP_OTHER(gr_sub_other, dst, dest_nz_idx, src1, a_nz_idx, src2, b_nz_idx, ctx2),
+ dst, src1, src2, ctx
+ );
+}
+
+int
+gr_sparse_vec_mul_other(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_ZERO,
+ GR_SPV_RFL_ZERO,
+ GR_SPV_RFL_BOP_OTHER(gr_mul_other, dst, dest_nz_idx, src1, a_nz_idx, src2, b_nz_idx, ctx2),
+ dst, src1, src2, ctx
+ );
+}
+
+int gr_other_add_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP_OTHER(gr_set_other, dst, dest_nz_idx, src1, a_nz_idx, ctx1),
+ GR_SPV_RFL_UOP(gr_set, dst, dest_nz_idx, src2, b_nz_idx),
+ GR_SPV_RFL_OTHER_BOP(gr_other_add, dst, dest_nz_idx, src1, a_nz_idx, ctx1, src2, b_nz_idx),
+ dst, src1, src2, ctx
+ );
+}
+
+int gr_other_sub_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_UOP_OTHER(gr_set_other, dst, dest_nz_idx, src1, a_nz_idx, ctx1),
+ GR_SPV_RFL_UOP(gr_neg, dst, dest_nz_idx, src2, b_nz_idx),
+ GR_SPV_RFL_OTHER_BOP(gr_other_sub, dst, dest_nz_idx, src1, a_nz_idx, ctx1, src2, b_nz_idx),
+ dst, src1, src2, ctx
+ );
+}
+
+int gr_other_mul_sparse_vec(gr_sparse_vec_t dst, const gr_sparse_vec_t src1, gr_ctx_t ctx1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ GR_SPV_RFL_TEMPLATE(
+ GR_SPV_RFL_ZERO,
+ GR_SPV_RFL_ZERO,
+ GR_SPV_RFL_OTHER_BOP(gr_other_mul, dst, dest_nz_idx, src1, a_nz_idx, ctx1, src2, b_nz_idx),
+ dst, src1, src2, ctx
+ );
+}
+
+int gr_sparse_vec_addmul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_TEMPLATE(gr_mul, gr_addmul, dst, src, c, ctx) }
+
+int gr_sparse_vec_submul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_TEMPLATE(gr_negmul, gr_submul, dst, src, c, ctx) }
+
+int gr_sparse_vec_addmul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_TEMPLATE(gr_mul_si, gr_addmul_si, dst, src, c, ctx) }
+
+int gr_sparse_vec_submul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_TEMPLATE(gr_negmul_si, gr_submul_si, dst, src, c, ctx); }
+
+/**
+ * Arithmetic into dense vectors
+**/
+
+// Internal macro to update a dense vector by iterating over a sparse one
+#define GR_SPV_INTO_DENSE_TEMPLATE(FUNC, SVEC, CTX) \
+ slong i; \
+ slong sz = (CTX)->sizeof_elem; \
+ int status = GR_SUCCESS; \
+ slong nnz = (SVEC)->nnz; \
+ for (i = 0; i < nnz; i++) \
+ { \
+ status |= (FUNC); \
+ } \
+ return status;
+
+// Sub-macro for applying operation (dense, sparse) -> dense
+#define GR_SPV_OP_ON_DENSE_TEMPLATE(ELEM_OP, DRES, DVEC, SVEC, CTX) \
+ GR_SPV_INTO_DENSE_TEMPLATE(ELEM_OP(\
+ GR_ENTRY((DRES), (SVEC)->inds[i], sz), \
+ GR_ENTRY(DVEC, (SVEC)->inds[i], sz), \
+ GR_ENTRY((SVEC)->nzs, i, sz), \
+ CTX), SVEC, CTX)
+
+// Sub-macro for accumulating operation on dense from sparse
+#define GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(ELEM_OP, DRES, SVEC, C, CTX) \
+ GR_SPV_INTO_DENSE_TEMPLATE(ELEM_OP(\
+ GR_ENTRY(DRES, (SVEC)->inds[i], sz), \
+ GR_ENTRY((SVEC)->nzs, i, sz),\
+ C, CTX), SVEC, CTX)
+
+int gr_vec_update_sparse_vec_nz(gr_ptr dres, const gr_sparse_vec_t src, gr_ctx_t ctx)
+{ GR_SPV_INTO_DENSE_TEMPLATE(gr_set(GR_ENTRY(dres, src->inds[i], sz), GR_ENTRY(src->nzs, i, sz), ctx), src, ctx) }
+
+int gr_vec_add_sparse_vec(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+{ GR_SPV_OP_ON_DENSE_TEMPLATE(gr_add, dres, dvec1, svec2, ctx) }
+
+int gr_vec_sub_sparse_vec(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+{ GR_SPV_OP_ON_DENSE_TEMPLATE(gr_sub, dres, dvec1, svec2, ctx) }
+
+int gr_vec_mul_sparse_vec_nz(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+{ GR_SPV_OP_ON_DENSE_TEMPLATE(gr_mul, dres, dvec1, svec2, ctx) }
+
+int gr_vec_div_sparse_vec_nz(gr_ptr dres, gr_srcptr dvec1, const gr_sparse_vec_t svec2, gr_ctx_t ctx)
+{ GR_SPV_OP_ON_DENSE_TEMPLATE(gr_div, dres, dvec1, svec2, ctx) }
+
+int gr_vec_addmul_sparse_vec_scalar(gr_ptr dres, const gr_sparse_vec_t svec, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(gr_addmul, dres, svec, c, ctx) }
+
+int gr_vec_submul_sparse_vec_scalar(gr_ptr dres, const gr_sparse_vec_t svec, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(gr_submul, dres, svec, c, ctx) }
+
+int gr_vec_addmul_sparse_vec_scalar_si(gr_ptr dres, const gr_sparse_vec_t svec, slong c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(gr_addmul_si, dres, svec, c, ctx) }
+
+int gr_vec_submul_sparse_vec_scalar_si(gr_ptr dres, const gr_sparse_vec_t svec, slong c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(gr_submul_si, dres, svec, c, ctx) }
+
+int gr_vec_addmul_sparse_vec_scalar_fmpz(gr_ptr dres, const gr_sparse_vec_t svec, const fmpz_t c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(gr_addmul_fmpz, dres, svec, c, ctx) }
+
+int gr_vec_submul_sparse_vec_scalar_fmpz(gr_ptr dres, const gr_sparse_vec_t svec, const fmpz_t c, gr_ctx_t ctx)
+{ GR_SPV_ACCUM_INTO_DENSE_TEMPLATE(gr_submul_fmpz, dres, svec, c, ctx) }
+
+/**
+ * Scalar multiplication and division
+**/
+
+#define GR_SPARSE_VEC_DENSE_VEC_OP(dense_vec_op, dst, src, c, ctx) \
+ if(dst->length != src->length) \
+ { \
+ return GR_DOMAIN; \
+ } \
+ if(dst != src) \
+ { \
+ gr_sparse_vec_fit_nnz(dst, src->nnz, ctx); \
+ dst->nnz = src->nnz; \
+ memcpy(dst->inds, src->inds, src->nnz*sizeof(slong)); \
+ } \
+ return dense_vec_op(dst->nzs, src->nzs, src->nnz, c, ctx); \
+
+int
+gr_sparse_vec_mul_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_mul_scalar, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_mul_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_mul_scalar_si, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_mul_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_mul_scalar_ui, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_mul_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpz, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_mul_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_mul_scalar_fmpq, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_mul_scalar_2exp_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_mul_scalar_2exp_si, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_div_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_div_scalar, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_div_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_div_scalar_si, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_div_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_div_scalar_ui, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_div_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_div_scalar_fmpz, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_div_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_div_scalar_fmpq, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_divexact_scalar(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_srcptr c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_divexact_scalar, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_divexact_scalar_si(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_divexact_scalar_si, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_divexact_scalar_ui(gr_sparse_vec_t dst, const gr_sparse_vec_t src, ulong c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_divexact_scalar_ui, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_divexact_scalar_fmpz(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpz_t c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpz, dst, src, c, ctx) }
+
+int
+gr_sparse_vec_divexact_scalar_fmpq(gr_sparse_vec_t dst, const gr_sparse_vec_t src, const fmpq_t c, gr_ctx_t ctx)
+{ GR_SPARSE_VEC_DENSE_VEC_OP(_gr_vec_divexact_scalar_fmpq, dst, src, c, ctx) }
diff --git a/src/gr_sparse_vec/count_unique_inds.c b/src/gr_sparse_vec/count_unique_inds.c
new file mode 100644
index 0000000000..c59d9cfc6f
--- /dev/null
+++ b/src/gr_sparse_vec/count_unique_inds.c
@@ -0,0 +1,24 @@
+#include "gr_sparse_vec.h"
+
+slong
+_gr_sparse_vec_count_unique_inds(const ulong *inds0, slong nnz0, const ulong *inds1, slong nnz1)
+{
+ slong ind0 = 0;
+ slong ind1 = 0;
+ slong count = 0;
+ while (ind0 < nnz0 && ind1 < nnz1)
+ {
+ slong col0 = inds0[ind0];
+ slong col1 = inds1[ind1];
+ if (col0 <= col1)
+ ind0++;
+ if (col0 >= col1)
+ ind1++;
+ count++;
+ }
+ if (ind0 < nnz0)
+ count += (nnz0 - ind0);
+ else if (ind1 < nnz1)
+ count += (nnz1 - ind1);
+ return count;
+}
diff --git a/src/gr_sparse_vec/dot.c b/src/gr_sparse_vec/dot.c
new file mode 100644
index 0000000000..f0ef1b91e5
--- /dev/null
+++ b/src/gr_sparse_vec/dot.c
@@ -0,0 +1,60 @@
+/*
+ Copyright (C) 2023 Fredrik Johansson
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include
+#include "gr_sparse_vec.h"
+
+int gr_sparse_vec_dot(gr_ptr dst, gr_srcptr initial, int subtract, const gr_sparse_vec_t src1, const gr_sparse_vec_t src2, gr_ctx_t ctx)
+{
+ int status;
+ slong nz_idx1, nz_idx2;
+ slong sz = ctx->sizeof_elem;
+
+ if (src1->length != src2->length)
+ {
+ return GR_DOMAIN;
+ }
+ status = gr_set(dst, initial, ctx);
+ for (nz_idx1 = 0, nz_idx2 = 0; nz_idx1 < src1->nnz && nz_idx2 < src2->nnz; )
+ {
+ if (src1->inds[nz_idx1] < src2->inds[nz_idx2])
+ nz_idx1++;
+ else if (src1->inds[nz_idx1] > src2->inds[nz_idx2])
+ nz_idx2++;
+ else {
+ if (subtract)
+ status |= gr_submul(dst, GR_ENTRY(src1->nzs, nz_idx1, sz), GR_ENTRY(src2->nzs, nz_idx2, sz), ctx);
+ else
+ status |= gr_addmul(dst, GR_ENTRY(src1->nzs, nz_idx1, sz), GR_ENTRY(src2->nzs, nz_idx2, sz), ctx);
+ nz_idx1++, nz_idx2++;
+ }
+ }
+ return status;
+}
+
+int gr_sparse_vec_dot_vec(gr_ptr dst, gr_srcptr initial, int subtract, const gr_sparse_vec_t src1, gr_srcptr src2, gr_ctx_t ctx)
+{
+ int status;
+ slong nz_idx;
+ slong sz = ctx->sizeof_elem;
+
+ status = gr_set(dst, initial, ctx);
+ //flint_printf("dst = "); status |= gr_println(dst, ctx);
+ for (nz_idx = 0; nz_idx < src1->nnz; nz_idx++)
+ {
+ if (subtract)
+ status |= gr_submul(dst, GR_ENTRY(src1->nzs, nz_idx, sz), GR_ENTRY(src2, src1->inds[nz_idx], sz), ctx);
+ else
+ status |= gr_addmul(dst, GR_ENTRY(src1->nzs, nz_idx, sz), GR_ENTRY(src2, src1->inds[nz_idx], sz), ctx);
+ //flint_printf("dst = "); status |= gr_println(dst, ctx);
+ }
+ return status;
+}
diff --git a/src/gr_sparse_vec/equal.c b/src/gr_sparse_vec/equal.c
new file mode 100644
index 0000000000..77d9d89aec
--- /dev/null
+++ b/src/gr_sparse_vec/equal.c
@@ -0,0 +1,46 @@
+#include "gr_sparse_vec.h"
+
+truth_t
+gr_sparse_vec_equal(const gr_sparse_vec_t vec1, const gr_sparse_vec_t vec2, gr_ctx_t ctx)
+{
+ slong i1, i2, sz;
+ truth_t cur_test;
+ truth_t ret = T_TRUE;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+
+ if (vec1->length != vec2->length)
+ return T_FALSE;
+ sz = ctx->sizeof_elem;
+
+ for (i1 = 0, i2 = 0; i1 < vec1->nnz && i2 < vec2->nnz; )
+ {
+ if (vec1->inds[i1] < vec2->inds[i2])
+ {
+ // In vector => either known or maybe nonzero
+ if (is_zero(GR_ENTRY(vec1->nzs, i1, sz), ctx) == T_FALSE)
+ return T_FALSE;
+ else
+ ret = T_UNKNOWN; // Have maybe zero vs known zero
+ i1++;
+ }
+ else if (vec1->inds[i1] > vec2->inds[i2])
+ {
+ if (is_zero(GR_ENTRY(vec2->nzs, i2, sz), ctx) == T_FALSE)
+ return T_FALSE;
+ else
+ ret = T_UNKNOWN; // Have maybe zero vs known zero
+ i2++;
+ }
+ else
+ {
+ cur_test = gr_equal(GR_ENTRY(vec1->nzs, i1, sz), GR_ENTRY(vec2->nzs, i2, sz), ctx);
+ if (cur_test == T_FALSE)
+ return T_FALSE;
+ else if (cur_test == T_UNKNOWN)
+ ret = T_UNKNOWN;
+ i1++; i2++;
+ }
+ }
+
+ return ret;
+}
diff --git a/src/gr_sparse_vec/fit_nnz.c b/src/gr_sparse_vec/fit_nnz.c
new file mode 100644
index 0000000000..5ae7b4c348
--- /dev/null
+++ b/src/gr_sparse_vec/fit_nnz.c
@@ -0,0 +1,23 @@
+#include "gr_sparse_vec.h"
+
+void
+gr_sparse_vec_fit_nnz(gr_sparse_vec_t vec, slong nnz, gr_ctx_t ctx)
+{
+ slong alloc = vec->alloc;
+ slong new_alloc = nnz;
+ /* It doesn't make sense to allocate more than the ambient dimension */
+ if (new_alloc > vec->length)
+ new_alloc = vec->length;
+ if (new_alloc > alloc)
+ {
+ slong sz = ctx->sizeof_elem;
+ if (new_alloc < 2 * alloc)
+ new_alloc = 2 * alloc;
+ if (new_alloc > vec->length)
+ new_alloc = vec->length;
+ vec->inds = flint_realloc(vec->inds, new_alloc * sizeof(ulong));
+ vec->nzs = flint_realloc(vec->nzs, new_alloc * sz);
+ _gr_vec_init(GR_ENTRY(vec->nzs, alloc, sz), new_alloc - alloc, ctx);
+ vec->alloc = new_alloc;
+ }
+}
diff --git a/src/gr_sparse_vec/from_dense.c b/src/gr_sparse_vec/from_dense.c
new file mode 100644
index 0000000000..bdaefbef3e
--- /dev/null
+++ b/src/gr_sparse_vec/from_dense.c
@@ -0,0 +1,29 @@
+#include "gr_sparse_vec.h"
+
+int
+gr_sparse_vec_set_vec(gr_sparse_vec_t vec, gr_srcptr src, slong len, gr_ctx_t ctx)
+{
+ slong nnz,i,sz,status;
+ sz = ctx->sizeof_elem;
+ nnz = 0;
+ status = GR_SUCCESS;
+ for (i = 0; i < len; i++)
+ {
+ if (T_TRUE != gr_is_zero(GR_ENTRY(src, i, sz), ctx))
+ nnz++;
+ }
+ gr_sparse_vec_fit_nnz(vec, nnz, ctx);
+ nnz = 0;
+ for (i = 0; i < len; i++)
+ {
+ if (T_TRUE != gr_is_zero(GR_ENTRY(src, i, sz), ctx))
+ {
+ vec->inds[nnz] = i;
+ status |= gr_set(GR_ENTRY(vec->nzs, nnz, sz), GR_ENTRY(src, i, sz), ctx);
+ nnz++;
+ }
+ }
+ vec->length = len;
+ vec->nnz = nnz;
+ return status;
+}
diff --git a/src/gr_sparse_vec/from_entries.c b/src/gr_sparse_vec/from_entries.c
new file mode 100644
index 0000000000..c38ff76346
--- /dev/null
+++ b/src/gr_sparse_vec/from_entries.c
@@ -0,0 +1,91 @@
+#include
+#include "gr_sparse_vec.h"
+
+typedef struct
+{
+ slong i;
+ slong ind;
+}
+sparse_vec_index_t;
+
+static int sparse_vec_index_cmp(const void* a, const void* b)
+{
+ slong aind = ((sparse_vec_index_t*)(a))->ind;
+ slong bind = ((sparse_vec_index_t*)(b))->ind;
+ return (aind < bind ? -1 : (aind > bind ? 1 : 0));
+}
+
+
+static sparse_vec_index_t * _sort_inds(ulong * inds, slong num)
+{
+ slong i;
+ sparse_vec_index_t * si;
+
+ si = flint_malloc(num * sizeof(sparse_vec_index_t));
+ for (i = 0; i < num; i++)
+ {
+ si[i].i = i;
+ si[i].ind = inds[i];
+ }
+
+ qsort(si, num, sizeof(sparse_vec_index_t), sparse_vec_index_cmp);
+ return si;
+}
+
+int
+gr_sparse_vec_from_entries(gr_sparse_vec_t vec, ulong * inds, gr_srcptr entries, slong nnz, truth_t is_canonical, gr_ctx_t ctx)
+{
+ slong i;
+ slong sz = ctx->sizeof_elem;
+ int status = GR_SUCCESS;
+ sparse_vec_index_t *si;
+ gr_ptr vec_entry, entry;
+ gr_method_unary_predicate is_zero = GR_UNARY_PREDICATE(ctx, IS_ZERO);
+
+ for (i = 0; i < nnz; ++i)
+ if (inds[i] >= vec->length)
+ return GR_DOMAIN;
+
+ gr_sparse_vec_fit_nnz(vec, nnz, ctx);
+ if (is_canonical == T_TRUE)
+ {
+ // Just copy data
+ memcpy(vec->inds, inds, nnz * sizeof(ulong));
+ status |= _gr_vec_set(vec->nzs, entries, nnz, ctx);
+ vec->nnz = nnz;
+ }
+ else
+ {
+ si = _sort_inds(inds, nnz);
+ vec->nnz = 0;
+ vec_entry = NULL;
+ for(i = 0; i < nnz; ++i)
+ {
+ entry = GR_ENTRY(entries, si[i].i, sz);
+
+ // If index is repeated, accumulate into current vector entry
+ if (i > 0 && si[i].ind == si[i-1].ind)
+ status |= gr_add(vec_entry, vec_entry, entry, ctx);
+ else
+ {
+ // If current entry is empty or nonzero, move to next one
+ if (vec_entry == NULL || is_zero(vec_entry, ctx) != T_TRUE)
+ {
+ vec_entry = GR_ENTRY(vec->nzs, vec->nnz, sz);
+ ++vec->nnz;
+ }
+ vec->inds[vec->nnz-1] = si[i].ind;
+ status |= gr_set(vec_entry, entry, ctx);
+ }
+ }
+ // Check if last entry accumulated to zero
+ if (vec_entry != NULL && is_zero(vec_entry, ctx) == T_TRUE)
+ vec->nnz--;
+
+ flint_free(si);
+ }
+ return status;
+}
+
+
+
diff --git a/src/gr_sparse_vec/get_entry.c b/src/gr_sparse_vec/get_entry.c
new file mode 100644
index 0000000000..953fac1cc4
--- /dev/null
+++ b/src/gr_sparse_vec/get_entry.c
@@ -0,0 +1,37 @@
+#include
+#include "gr_sparse_vec.h"
+
+
+static int gr_sparse_vec_ulong_cmp(const void* a, const void* b)
+{
+ ulong av = *((ulong*)(a));
+ ulong bv = *((ulong*)(b));
+ return (av < bv ? -1 : (av > bv ? 1 : 0));
+}
+
+
+gr_ptr
+gr_sparse_vec_find_entry(gr_sparse_vec_t vec, slong col, gr_ctx_t ctx)
+{
+ slong sz = ctx->sizeof_elem;
+ ulong* bs = NULL;
+ if (col < 0 || col >= vec->length)
+ return NULL;
+ bs = bsearch(&col, vec->inds, vec->nnz, sizeof(slong), gr_sparse_vec_ulong_cmp);
+ if (bs == NULL)
+ return NULL;
+ return GR_ENTRY(vec->nzs, bs - vec->inds, sz);
+}
+
+int
+gr_sparse_vec_get_entry(gr_ptr res, gr_sparse_vec_t vec, slong col, gr_ctx_t ctx)
+{
+ slong sz = ctx->sizeof_elem;
+ ulong* bs = NULL;
+ if (col < 0 || col >= vec->length)
+ return GR_DOMAIN;
+ bs = bsearch(&col, vec->inds, vec->nnz, sizeof(slong), gr_sparse_vec_ulong_cmp);
+ if (bs == NULL)
+ return gr_zero(res, ctx);
+ return gr_set(res, GR_ENTRY(vec->nzs, bs - vec->inds, sz), ctx);
+}
diff --git a/src/gr_sparse_vec/permute_inds.c b/src/gr_sparse_vec/permute_inds.c
new file mode 100644
index 0000000000..5915bf29f1
--- /dev/null
+++ b/src/gr_sparse_vec/permute_inds.c
@@ -0,0 +1,50 @@
+#include
+#include "gr_sparse_vec.h"
+
+typedef struct
+{
+ slong col;
+ slong i;
+}
+gr_sparse_vec_slong_sorter_t;
+
+static int gr_sparse_vec_slong_sort(const void* a, const void* b)
+{
+ slong ak = ((gr_sparse_vec_slong_sorter_t*)(a))->col;
+ slong bk = ((gr_sparse_vec_slong_sorter_t*)(b))->col;
+ return (ak < bk ? -1 : (ak > bk ? 1 : 0));
+}
+
+int
+gr_sparse_vec_permute_inds(gr_sparse_vec_t vec, const gr_sparse_vec_t src, slong * p, gr_ctx_t ctx)
+{
+ slong i,sz,nnz;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_slong_sorter_t *si;
+ if (vec == src)
+ {
+ gr_sparse_vec_t temp;
+ gr_sparse_vec_init(temp, src->length, ctx);
+ GR_MUST_SUCCEED(gr_sparse_vec_set(temp, src, ctx));
+ status |= gr_sparse_vec_permute_inds(temp, src, p, ctx);
+ status |= gr_sparse_vec_set(vec, temp, ctx);
+ gr_sparse_vec_clear(temp, ctx);
+ return status;
+ }
+ sz = ctx->sizeof_elem;
+ nnz = src->nnz;
+ si = flint_malloc(nnz * sizeof(gr_sparse_vec_slong_sorter_t));
+ for (i = 0; i < nnz; i++)
+ {
+ si[i].col = p[vec->inds[i]];
+ si[i].i = i;
+ }
+ qsort(si, nnz, sizeof(gr_sparse_vec_slong_sorter_t), gr_sparse_vec_slong_sort);
+ for (i = 0; i < nnz; i++)
+ {
+ vec->inds[i] = si[i].col;
+ status |= gr_set(GR_ENTRY(vec->nzs, i, sz), GR_ENTRY(src->nzs, si[i].i, sz), ctx);
+ }
+ flint_free(si);
+ return status;
+}
diff --git a/src/gr_sparse_vec/randtest.c b/src/gr_sparse_vec/randtest.c
new file mode 100644
index 0000000000..8a61458f7d
--- /dev/null
+++ b/src/gr_sparse_vec/randtest.c
@@ -0,0 +1,97 @@
+#include
+#include "gr_sparse_vec.h"
+
+static int
+slong_cmp(const void * a, const void * b)
+{
+ slong ax = *((slong *) a);
+ slong bx = *((slong *) b);
+ return ax - bx;
+}
+
+
+int
+gr_sparse_vec_randtest(gr_sparse_vec_t vec, slong nnz, int replacement, flint_rand_t state, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong i, j, sz;
+ sz = ctx->sizeof_elem;
+
+ if (nnz < 0 || nnz > vec->length)
+ return GR_DOMAIN;
+
+ // Make space
+ gr_sparse_vec_fit_nnz(vec, nnz, ctx);
+
+ if (replacement)
+ {
+ // Randomly sample nnz columns with replacement, and then sort and prune duplicates
+ for (i = 0; i < nnz; ++i)
+ vec->inds[i] = n_randint(state, vec->length);
+ qsort(vec->inds, nnz, sizeof(slong), slong_cmp);
+
+ j = 0;
+ for (i = 0; i < nnz; ++i)
+ if (i == 0 || vec->inds[i] != vec->inds[i-1])
+ vec->inds[j++] = vec->inds[i];
+ vec->nnz = j;
+ }
+ else
+ {
+ // Randomly sample nnz columns without replacement, then sort
+ for (i = 0; i < vec->length; ++i)
+ {
+ j = i < nnz ? i : n_randint(state, i+1);
+ if (j < nnz) vec->inds[j] = i;
+ }
+ if (nnz < vec->length)
+ qsort(vec->inds, nnz, sizeof(slong), slong_cmp);
+ vec->nnz = nnz;
+ }
+
+ for (i = 0; i < vec->nnz; ++i)
+ status |= gr_randtest_not_zero(GR_ENTRY(vec->nzs, i, sz), state, ctx);
+
+ return status;
+}
+
+int
+gr_sparse_vec_randtest_prob(gr_sparse_vec_t vec, double prob, flint_rand_t state, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong i, sz;
+ sz = ctx->sizeof_elem;
+
+ if (prob < 0 || prob > 1)
+ return GR_DOMAIN;
+
+ // Handle corner cases
+ if (prob == 0)
+ {
+ gr_sparse_vec_zero(vec, ctx);
+ return GR_SUCCESS;
+ }
+ if (prob == 1)
+ {
+ status |= gr_sparse_vec_randtest(vec, vec->length, 0, state, ctx);
+ return status;
+ }
+
+ // Allocate space for expected number of nonzeroes, and expand as needed
+ gr_sparse_vec_fit_nnz(vec, prob * vec->length, ctx);
+
+ // TODO: for low probability, should be able to do this faster
+ vec->nnz = 0;
+ for (i = 0; i < vec->length; ++i)
+ {
+ if (n_randint(state, 0) < 2 * prob * WORD_MAX)
+ {
+ if (vec->nnz == vec->alloc)
+ gr_sparse_vec_fit_nnz(vec, vec->alloc * 2, ctx);
+ status |= gr_randtest_not_zero(GR_ENTRY(vec->nzs, vec->nnz, sz), state, ctx);
+ vec->inds[vec->nnz++] = i;
+ }
+ }
+
+ return status;
+}
diff --git a/src/gr_sparse_vec/set.c b/src/gr_sparse_vec/set.c
new file mode 100644
index 0000000000..9761492ef2
--- /dev/null
+++ b/src/gr_sparse_vec/set.c
@@ -0,0 +1,20 @@
+#include "gr_sparse_vec.h"
+
+int
+gr_sparse_vec_set(gr_sparse_vec_t dst, const gr_sparse_vec_t src, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ if (dst->length < src->length)
+ return GR_DOMAIN;
+
+ if (dst != src)
+ {
+ slong nnz = src->nnz;
+ gr_sparse_vec_fit_nnz(dst, nnz, ctx);
+ memcpy(dst->inds, src->inds, nnz*sizeof(slong));
+ status = _gr_vec_set(dst->nzs, src->nzs, nnz, ctx);
+ dst->nnz = nnz;
+ dst->length = src->length;
+ }
+ return status;
+}
diff --git a/src/gr_sparse_vec/set_entry.c b/src/gr_sparse_vec/set_entry.c
new file mode 100644
index 0000000000..b8e141e8c7
--- /dev/null
+++ b/src/gr_sparse_vec/set_entry.c
@@ -0,0 +1,61 @@
+#include
+#include "gr_sparse_vec.h"
+
+static int gr_sparse_vec_ulong_cmp(const void* a, const void* b)
+{
+ ulong av = *((ulong*)(a));
+ ulong bv = *((ulong*)(b));
+ return (av < bv ? -1 : (av > bv ? 1 : 0));
+}
+
+int
+gr_sparse_vec_set_entry(gr_sparse_vec_t vec, slong col, gr_srcptr entry, gr_ctx_t ctx)
+{
+ slong i,j;
+ slong sz = ctx->sizeof_elem;
+ slong nnz = vec->nnz;
+ ulong* bs = NULL;
+ if (col < 0 || col >= vec->length)
+ return GR_DOMAIN;
+ bs = bsearch(&col, vec->inds, vec->nnz, sizeof(slong), gr_sparse_vec_ulong_cmp);
+ if (bs != NULL)
+ {
+ i = bs - vec->inds;
+ if (gr_is_zero(entry, ctx) == T_TRUE)
+ {
+ // Shift everything above i down
+ memmove(vec->inds + i, vec->inds + i + 1, (vec->nnz - i - 1)*sizeof(slong));
+ for (j = i; j < vec->nnz; j++)
+ {
+ gr_swap(GR_ENTRY(vec->nzs, j, sz), GR_ENTRY(vec->nzs, j + 1, sz), ctx);
+ }
+ --vec->nnz;
+ return GR_SUCCESS;
+ }
+ }
+ else
+ {
+ if (gr_is_zero(entry, ctx) == T_TRUE)
+ {
+ // Already 0
+ return GR_SUCCESS;
+ }
+ // Make room for new element
+ gr_sparse_vec_fit_nnz(vec, vec->nnz+1, ctx);
+
+ // Find location to put new element
+ for (i = 0; i < nnz; i++)
+ if (col < vec->inds[i])
+ break;
+
+ // Shift everything above i up
+ memmove(vec->inds + i + 1, vec->inds + i, (vec->nnz - i)*sizeof(slong));
+ for (j = vec->nnz; j > i; j--)
+ {
+ gr_swap(GR_ENTRY(vec->nzs, j-1, sz), GR_ENTRY(vec->nzs, j, sz), ctx);
+ }
+ vec->inds[i] = col;
+ ++vec->nnz;
+ }
+ return gr_set(GR_ENTRY(vec->nzs, i, sz), entry, ctx);
+}
diff --git a/src/gr_sparse_vec/set_length.c b/src/gr_sparse_vec/set_length.c
new file mode 100644
index 0000000000..448fb63030
--- /dev/null
+++ b/src/gr_sparse_vec/set_length.c
@@ -0,0 +1,15 @@
+#include "gr_sparse_vec.h"
+
+void
+gr_sparse_vec_set_length(gr_sparse_vec_t vec, slong len, gr_ctx_t ctx)
+{
+ vec->length = len;
+ /* Scan backward through the nonzeros to discard the ones off the end */
+ /* Note that we don't actually free anything; we just mark the nonzeros as unused */
+ slong i=vec->nnz-1;
+ while (i>=0 && vec->inds[i] >= len)
+ {
+ i--;
+ }
+ vec->nnz = i+1;
+}
diff --git a/src/gr_sparse_vec/shrink_to_nnz.c b/src/gr_sparse_vec/shrink_to_nnz.c
new file mode 100644
index 0000000000..10a6de3213
--- /dev/null
+++ b/src/gr_sparse_vec/shrink_to_nnz.c
@@ -0,0 +1,15 @@
+#include "gr_sparse_vec.h"
+
+void
+gr_sparse_vec_shrink_to_nnz(gr_sparse_vec_t vec, gr_ctx_t ctx)
+{
+ slong nnz = vec->nnz;
+ slong sz = ctx->sizeof_elem;
+ if (vec->alloc > nnz)
+ {
+ vec->inds = flint_realloc(vec->inds, nnz * sizeof(ulong));
+ _gr_vec_clear(GR_ENTRY(vec->nzs, nnz, sz), vec->alloc - nnz, ctx);
+ vec->nzs = flint_realloc(vec->nzs, nnz * sz);
+ vec->alloc = nnz;
+ }
+}
diff --git a/src/gr_sparse_vec/slice.c b/src/gr_sparse_vec/slice.c
new file mode 100644
index 0000000000..6b9dcfe6ec
--- /dev/null
+++ b/src/gr_sparse_vec/slice.c
@@ -0,0 +1,52 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "gr_sparse_vec.h"
+
+int
+gr_sparse_vec_slice(gr_sparse_vec_t dst, const gr_sparse_vec_t src, slong col_start, slong col_end, gr_ctx_t ctx)
+{
+ slong sz,i,nnz, new_nnz, i_start, i_end, status;
+ nnz = src->nnz;
+ sz = ctx->sizeof_elem;
+ i_start = 0;
+ i_end = nnz;
+ status = GR_SUCCESS;
+ for (i = 0; i < nnz; i++)
+ {
+ /* If we find a valid column, start the interval */
+ if (src->inds[i] >= col_start)
+ {
+ i_start = i;
+ break;
+ }
+ }
+ for (; i <= nnz; i++)
+ {
+ /* Note we will always do this; the first time we see a column outside the interval, or we run off the end, stop it */
+ if (i == nnz || src->inds[i] >= col_end)
+ {
+ i_end = i;
+ break;
+ }
+ }
+ /* We are messing with the internals of dst; this should be ok even if it is also src */
+ new_nnz = i_end - i_start;
+ dst->length = col_end - col_start;
+ gr_sparse_vec_fit_nnz(dst, new_nnz, ctx);
+ for (i = i_start; i < i_end; i++)
+ {
+ dst->inds[i-i_start] = dst->inds[i] - col_start;
+ status |= gr_set(GR_ENTRY(dst->nzs, i-i_start, sz), GR_ENTRY(src->nzs, i, sz), ctx);
+ }
+ dst->nnz = new_nnz;
+ return status;
+}
diff --git a/src/gr_sparse_vec/test/main.c b/src/gr_sparse_vec/test/main.c
new file mode 100644
index 0000000000..1f5722d5d9
--- /dev/null
+++ b/src/gr_sparse_vec/test/main.c
@@ -0,0 +1,27 @@
+#include
+#include
+
+/* Include functions *********************************************************/
+
+#include "t-arith.c"
+#include "t-conversion.c"
+#include "t-dot.c"
+#include "t-init.c"
+#include "t-randtest.c"
+#include "t-sum-prod.c"
+
+/* Array of test functions ***************************************************/
+
+test_struct tests[] =
+{
+ TEST_FUNCTION(gr_sparse_vec_init),
+ TEST_FUNCTION(gr_sparse_vec_conversion),
+ TEST_FUNCTION(gr_sparse_vec_randtest),
+ TEST_FUNCTION(gr_sparse_vec_arith),
+ TEST_FUNCTION(gr_sparse_vec_dot),
+ TEST_FUNCTION(gr_sparse_vec_sum_prod),
+};
+
+/* main function *************************************************************/
+
+TEST_MAIN(tests)
diff --git a/src/gr_sparse_vec/test/t-arith.c b/src/gr_sparse_vec/test/t-arith.c
new file mode 100644
index 0000000000..51e9cef9b4
--- /dev/null
+++ b/src/gr_sparse_vec/test/t-arith.c
@@ -0,0 +1,441 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_vec.h"
+#include "fmpz.h"
+#include "fmpq.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+int
+
+
+test_add_sub_mul(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, k, l, m;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec, vec2, vec_other;
+ gr_ptr dvec, dvec2, dvec3, dvec_other;
+ gr_ptr temp;
+ gr_ctx_t ctx_other;
+ truth_t eq;
+
+ gr_ctx_init_fmpz(ctx_other);
+ GR_TMP_INIT(temp, ctx);
+ GR_TMP_INIT_VEC(dvec, N, ctx);
+ GR_TMP_INIT_VEC(dvec2, N, ctx);
+ GR_TMP_INIT_VEC(dvec3, N, ctx);
+ GR_TMP_INIT_VEC(dvec_other, N, ctx_other);
+ gr_sparse_vec_init(vec, N, ctx);
+ gr_sparse_vec_init(vec2, N, ctx);
+ gr_sparse_vec_init(vec_other, N, ctx_other);
+
+ for (i = 0; i < 18 * n_tests; i++)
+ {
+ status = GR_SUCCESS;
+ //flint_printf("%d\n", i);
+ k = i % 2; // Into sparse or dense
+ l = (i / 2) % 3; // Add, subtract, or mul
+ m = (i / 6) % 3; // For into sparse, also check other ctx usage
+ if (k > 0 && m > 0) continue;
+
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ if (k == 0)
+ {
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+ if (m == 0)
+ {
+ status |= gr_sparse_vec_randtest(vec2, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(dvec2, vec2, ctx);
+ if (l == 0)
+ {
+ status |= gr_sparse_vec_add(vec, vec, vec2, ctx);
+ status |= _gr_vec_add(dvec, dvec, dvec2, N, ctx);
+ }
+ else if (l == 1)
+ {
+ status |= gr_sparse_vec_sub(vec, vec, vec2, ctx);
+ status |= _gr_vec_sub(dvec, dvec, dvec2, N, ctx);
+ }
+ else
+ {
+ status |= gr_sparse_vec_mul(vec, vec, vec2, ctx);
+ status |= _gr_vec_mul(dvec, dvec, dvec2, N, ctx);
+ }
+ }
+ else
+ {
+ status |= gr_sparse_vec_randtest(vec_other, 10, 0, state, ctx_other);
+ status |= gr_vec_set_sparse_vec(dvec_other, vec_other, ctx_other);
+ if (m == 1)
+ {
+ if (l == 0)
+ {
+ status |= gr_sparse_vec_add_other(vec, vec, vec_other, ctx_other, ctx);
+ status |= _gr_vec_add_other(dvec, dvec, dvec_other, ctx_other, N, ctx);
+ }
+ else if (l == 1)
+ {
+ status |= gr_sparse_vec_sub_other(vec, vec, vec_other, ctx_other, ctx);
+ status |= _gr_vec_sub_other(dvec, dvec, dvec_other, ctx_other, N, ctx);
+ }
+ else
+ {
+ status |= gr_sparse_vec_mul_other(vec, vec, vec_other, ctx_other, ctx);
+ status |= _gr_vec_mul_other(dvec, dvec, dvec_other, ctx_other, N, ctx);
+ }
+ }
+ else
+ {
+ if (l == 0)
+ {
+ status |= gr_other_add_sparse_vec(vec, vec_other, ctx_other, vec, ctx);
+ status |= _gr_other_add_vec(dvec, dvec_other, ctx_other, dvec, N, ctx);
+ }
+ else if (l == 1)
+ {
+ status |= gr_other_sub_sparse_vec(vec, vec_other, ctx_other, vec, ctx);
+ status |= _gr_other_sub_vec(dvec, dvec_other, ctx_other, dvec, N, ctx);
+ }
+ else
+ {
+ status |= gr_other_mul_sparse_vec(vec, vec_other, ctx_other, vec, ctx);
+ status |= _gr_other_mul_vec(dvec, dvec_other, ctx_other, dvec, N, ctx);
+ }
+ }
+ }
+ }
+ else
+ {
+ status |= _gr_vec_randtest(dvec, state, N, ctx);
+ status |= _gr_vec_set(dvec2, dvec, N, ctx);
+ status |= gr_vec_set_sparse_vec(dvec3, vec, ctx);
+
+ if (l == 0)
+ {
+ status |= gr_vec_add_sparse_vec(dvec, dvec, vec, ctx);
+ status |= _gr_vec_add(dvec2, dvec2, dvec3, N, ctx);
+ }
+ else if (l == 1)
+ {
+ status |= gr_vec_sub_sparse_vec(dvec, dvec, vec, ctx);
+ status |= _gr_vec_sub(dvec2, dvec2, dvec3, N, ctx);
+ }
+ else
+ {
+ continue; // TODO
+ }
+ }
+
+ if (status == GR_UNABLE)
+ continue;
+
+ if (k == 0)
+ status |= gr_vec_set_sparse_vec(dvec2, vec, ctx);
+ eq = _gr_vec_equal(dvec, dvec2, N, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ flint_printf(
+ "\ni = %d, k = %d, l = %d, m = %d, equal = %d, status = %d\n",
+ i, k, l, m, eq, status
+ );
+ gr_ctx_println(ctx);
+ flint_printf("dvec = "); _gr_vec_print(dvec, N, ctx); flint_printf("\n");
+ flint_printf("dvec2 = "); _gr_vec_print(dvec2, N, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_sparse_vec_clear(vec, ctx);
+ gr_sparse_vec_clear(vec2, ctx);
+ gr_sparse_vec_clear(vec_other, ctx_other);
+ GR_TMP_CLEAR_VEC(dvec, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec2, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec3, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec_other, N, ctx_other);
+ GR_TMP_CLEAR(temp, ctx);
+ gr_ctx_clear(ctx_other);
+ return status;
+}
+
+#define TEST_ACCUM_MUL_SCALAR(STATUS, K, L, TYPE, VEC, VEC2, DVEC, DVEC2, DVEC3, C, CTX) { \
+ if (K == 0) \
+ { \
+ if (L == 0) \
+ { \
+ STATUS |= gr_sparse_vec_addmul_##TYPE(VEC, VEC2, C, CTX); \
+ STATUS |= _gr_vec_addmul_##TYPE(DVEC, DVEC2, N, C, CTX); \
+ } \
+ else \
+ { \
+ STATUS |= gr_sparse_vec_submul_##TYPE(VEC, VEC2, C, CTX); \
+ STATUS |= _gr_vec_submul_##TYPE(DVEC, DVEC2, N, C, CTX); \
+ } \
+ } \
+ else \
+ { \
+ if (L == 0) \
+ { \
+ status |= gr_vec_addmul_sparse_vec_##TYPE(DVEC, VEC, C, CTX); \
+ status |= _gr_vec_addmul_##TYPE(DVEC2, DVEC3, N, C, CTX); \
+ } \
+ else \
+ { \
+ status |= gr_vec_submul_sparse_vec_##TYPE(DVEC, VEC, C, CTX); \
+ status |= _gr_vec_submul_##TYPE(DVEC2, DVEC3, N, C, CTX); \
+ } \
+ } \
+}
+
+int test_accum_mul_scalar(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j, k, l, c;
+ slong N = 20;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec, vec2, vec_other;
+ gr_ptr dvec, dvec2, dvec3;
+ gr_ptr temp;
+ gr_ctx_t ctx_other;
+ truth_t eq;
+
+ gr_ctx_init_fmpz(ctx_other);
+ GR_TMP_INIT(temp, ctx);
+ GR_TMP_INIT_VEC(dvec, N, ctx);
+ GR_TMP_INIT_VEC(dvec2, N, ctx);
+ GR_TMP_INIT_VEC(dvec3, N, ctx);
+ gr_sparse_vec_init(vec, N, ctx);
+ gr_sparse_vec_init(vec2, N, ctx);
+ gr_sparse_vec_init(vec_other, N, ctx_other);
+
+ for (i = 0; i < 8 * n_tests; i++)
+ {
+ status = GR_SUCCESS;
+ //flint_printf("%d\n", i);
+ j = i % 2; // Scaler = ctx or si
+ k = (i / 2) % 2; // Into sparse or dense
+ l = (i / 4) % 2; // Add or subtract
+
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ if (k == 0)
+ {
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+ status |= gr_sparse_vec_randtest(vec2, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(dvec2, vec2, ctx);
+ }
+ else
+ {
+ status |= _gr_vec_randtest(dvec, state, N, ctx);
+ status |= _gr_vec_set(dvec2, dvec, N, ctx);
+ status |= gr_vec_set_sparse_vec(dvec3, vec, ctx);
+ }
+ if (j == 0)
+ {
+ status |= gr_randtest_not_zero(temp, state, ctx);
+ TEST_ACCUM_MUL_SCALAR(status, k, l, scalar, vec, vec2, dvec, dvec2, dvec3, temp, ctx)
+ break;
+ }
+ else
+ {
+ c = n_randint(state, 0);
+ TEST_ACCUM_MUL_SCALAR(status, k, l, scalar_si, vec, vec2, dvec, dvec2, dvec3, c, ctx)
+ break;
+ }
+ if (status == GR_UNABLE)
+ continue;
+
+ if (k == 0)
+ status |= gr_vec_set_sparse_vec(dvec2, vec, ctx);
+ eq = _gr_vec_equal(dvec, dvec2, N, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ flint_printf(
+ "\ni = %d, j = %d, k = %d, l = %d, equal = %d, status = %d\n",
+ i, j, k, l, eq, status
+ );
+ gr_ctx_println(ctx);
+ flint_printf("dvec = "); _gr_vec_print(dvec, N, ctx); flint_printf("\n");
+ flint_printf("dvec2 = "); _gr_vec_print(dvec2, N, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_sparse_vec_clear(vec, ctx);
+ gr_sparse_vec_clear(vec2, ctx);
+ gr_sparse_vec_clear(vec_other, ctx_other);
+ GR_TMP_CLEAR_VEC(dvec, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec2, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec3, N, ctx);
+ GR_TMP_CLEAR(temp, ctx);
+ gr_ctx_clear(ctx_other);
+ return status;
+}
+
+
+#define TEST_MUL_SCALAR(STATUS, K, TYPE, VEC, VEC2, DVEC, DVEC2, C, CTX) { \
+ if (K == 1) \
+ { \
+ STATUS |= gr_sparse_vec_div_##TYPE(VEC2, VEC, C, CTX); \
+ STATUS |= _gr_vec_div_##TYPE(DVEC2, DVEC, N, C, CTX); \
+ } \
+ else \
+ { \
+ STATUS |= gr_sparse_vec_mul_##TYPE(VEC2, VEC, C, CTX); \
+ STATUS |= _gr_vec_mul_##TYPE(DVEC2, DVEC, N, C, CTX); \
+ if (K == 2) \
+ { \
+ STATUS |= gr_sparse_vec_divexact_##TYPE(VEC2, VEC2, C, CTX); \
+ STATUS |= _gr_vec_divexact_##TYPE(DVEC2, DVEC2, N, C, CTX); \
+ } \
+ } \
+}
+
+int test_mul_div_scalar(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i, j, k, c;
+ ulong uc;
+ slong N = 100;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec, vec2;
+ gr_ptr dvec, dvec2;
+ gr_ptr temp;
+ fmpz_t zc;
+ fmpq_t qc;
+ truth_t eq;
+
+ GR_TMP_INIT(temp, ctx);
+ GR_TMP_INIT_VEC(dvec, N, ctx);
+ GR_TMP_INIT_VEC(dvec2, N, ctx);
+ fmpz_init(zc);
+ fmpq_init(qc);
+ gr_sparse_vec_init(vec, N, ctx);
+ gr_sparse_vec_init(vec2, N, ctx);
+
+ for (i = 0; i < 18 * n_tests; i++)
+ {
+ j = i % 6; // Which type of scalar
+ k = (i / 6) % 3; // Mul, div, or mul + divexact
+ if ((j == 4 || k == 1) && gr_ctx_is_field(ctx) != T_TRUE)
+ continue;
+ if (k == 2 && (gr_ctx_is_exact(ctx) != T_TRUE || gr_ctx_is_integral_domain(ctx) != T_TRUE))
+ continue;
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+
+ switch(j)
+ {
+ case 0:
+ //flint_printf("Testing scalar\n");
+ status |= gr_randtest_not_zero(temp, state, ctx);
+ TEST_MUL_SCALAR(status, k, scalar, vec, vec2, dvec, dvec2, temp, ctx)
+ break;
+ case 1:
+ //flint_printf("Testing scalar_si\n");
+ c = n_randint(state, 0);
+ TEST_MUL_SCALAR(status, k, scalar_si, vec, vec2, dvec, dvec2, c, ctx)
+ break;
+ case 2:
+ //flint_printf("Testing scalar_ui\n");
+ uc = n_randint(state, 0);
+ TEST_MUL_SCALAR(status, k, scalar_ui, vec, vec2, dvec, dvec2, uc, ctx)
+ break;
+ case 3:
+ //flint_printf("Testing scalar_fmpz\n");
+ fmpz_randtest_not_zero(zc, state, 32);
+ TEST_MUL_SCALAR(status, k, scalar_fmpz, vec, vec2, dvec, dvec2, zc, ctx)
+ break;
+ case 4:
+ //flint_printf("Testing scalar_fmpq\n");
+ fmpq_randtest_not_zero(qc, state, 32);
+ TEST_MUL_SCALAR(status, k, scalar_fmpq, vec, vec2, dvec, dvec2, qc, ctx)
+ break;
+ case 5:
+ //flint_printf("Testing scalar_2exp_si\n");
+ c = n_randint(state, 32) + 1;
+ if (k == 1)
+ {
+ status |= gr_sparse_vec_mul_scalar_2exp_si(vec2, vec, -c, ctx);
+ status |= _gr_vec_mul_scalar_2exp_si(dvec2, dvec, N, -c, ctx);
+ }
+ else
+ {
+ status |= gr_sparse_vec_mul_scalar_2exp_si(vec2, vec, c, ctx);
+ status |= _gr_vec_mul_scalar_2exp_si(dvec2, dvec, N, c, ctx);
+ if (k == 2)
+ {
+ status |= gr_sparse_vec_mul_scalar_2exp_si(vec2, vec2, -c, ctx);
+ status |= _gr_vec_mul_scalar_2exp_si(dvec2, dvec2, N, -c, ctx);
+ }
+ }
+ break;
+ }
+ // If any operation not allowed, just skip test
+ if (status == GR_UNABLE || status == GR_DOMAIN) // TODO: FIXME
+ {
+ status = GR_SUCCESS;
+ continue;
+ }
+ //gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ //gr_sparse_vec_print_nz(vec2, ctx); flint_printf("\n");
+
+ status |= gr_vec_set_sparse_vec(dvec, vec2, ctx);
+ eq = _gr_vec_equal(dvec, dvec2, N, ctx);
+ if (eq == T_FALSE || status != GR_SUCCESS)
+ {
+ flint_printf(
+ "j = %d, k = %d, equal = %d, status = %d\n",
+ j, k, eq, status
+ );
+ gr_ctx_println(ctx);
+ _gr_vec_print(dvec, N, ctx); flint_printf("\n");
+ _gr_vec_print(dvec2, N, ctx); flint_printf("\n");
+ status |= gr_sparse_vec_set_vec(vec, dvec2, N, ctx);
+ gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ gr_sparse_vec_print_nz(vec2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_sparse_vec_clear(vec, ctx);
+ gr_sparse_vec_clear(vec2, ctx);
+ GR_TMP_CLEAR_VEC(dvec, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec2, N, ctx);
+ fmpz_clear(zc);
+ fmpq_clear(qc);
+ GR_TMP_CLEAR(temp, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_vec_arith, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ //gr_ctx_init_fmpz(ctx);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ CHECK_TEST(test_add_sub_mul(state, ctx), "Addition, subtraction, and multiplication into sparse and dense");
+ CHECK_TEST(test_accum_mul_scalar(state, ctx), "Scalar addmul and submul into sparse and dense");
+ CHECK_TEST(test_mul_div_scalar(state, ctx), "Scalar multiplication and division");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_vec/test/t-conversion.c b/src/gr_sparse_vec/test/t-conversion.c
new file mode 100644
index 0000000000..3f69ff2661
--- /dev/null
+++ b/src/gr_sparse_vec/test/t-conversion.c
@@ -0,0 +1,99 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_vec.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int
+test_conversion(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong N = 100;
+ slong n_tests = 20;
+ slong sz = ctx->sizeof_elem;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec, vec2;
+ gr_vec_t dvec, dvec2;
+
+ gr_vec_init(dvec, N, ctx);
+ gr_vec_init(dvec2, N, ctx);
+ gr_sparse_vec_init(vec, N, ctx);
+ gr_sparse_vec_init(vec2, N, ctx);
+
+ //flint_printf("Testing copy\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= gr_sparse_vec_set(vec2, vec, ctx);
+ if (T_FALSE == gr_sparse_vec_equal(vec, vec2, ctx))
+ {
+ gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ gr_sparse_vec_print_nz(vec2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing from/to dense vec\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= _gr_vec_randtest(GR_VEC_ENTRY(dvec, 0, sz), state, N, ctx);
+ status |= gr_sparse_vec_set_vec(vec, GR_VEC_ENTRY(dvec, 0, sz), N, ctx);
+ status |= gr_vec_set_sparse_vec(GR_VEC_ENTRY(dvec2, 0, sz), vec, ctx);
+ if (T_FALSE == _gr_vec_equal(GR_VEC_ENTRY(dvec, 0, sz), GR_VEC_ENTRY(dvec2, 0, sz), N, ctx))
+ {
+ gr_vec_print(dvec, ctx); flint_printf("\n");
+ gr_vec_print(dvec2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing from/to sparse vec\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(GR_VEC_ENTRY(dvec, 0, sz), vec, ctx);
+ status |= gr_sparse_vec_set_vec(vec2, GR_VEC_ENTRY(dvec, 0, sz), N, ctx);
+ if (T_FALSE == gr_sparse_vec_equal(vec, vec2, ctx))
+ {
+ gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ gr_sparse_vec_print_nz(vec2, ctx); flint_printf("\n");
+ return GR_TEST_FAIL;
+ }
+ }
+ gr_sparse_vec_clear(vec, ctx);
+ gr_sparse_vec_clear(vec2, ctx);
+ gr_vec_clear(dvec, ctx);
+ gr_vec_clear(dvec2, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_vec_conversion, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_println(ctx);
+ CHECK_TEST(test_conversion(state, ctx), "Conversion from and to dense");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_vec/test/t-dot.c b/src/gr_sparse_vec/test/t-dot.c
new file mode 100644
index 0000000000..de6cb6783f
--- /dev/null
+++ b/src/gr_sparse_vec/test/t-dot.c
@@ -0,0 +1,117 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_vec.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_dot(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong N = 30;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec, vec2;
+ gr_ptr dvec, dvec2;
+ gr_ptr dot, dot2;
+ GR_TMP_INIT2(dot, dot2, ctx);
+
+ GR_TMP_INIT_VEC(dvec, N, ctx);
+ GR_TMP_INIT_VEC(dvec2, N, ctx);
+ gr_sparse_vec_init(vec, N, ctx);
+ gr_sparse_vec_init(vec2, N, ctx);
+
+ //flint_printf("Testing copy\n");
+ for (i = 0; i < 2*n_tests; i++)
+ {
+ // Give a random initial value to dot
+ status |= gr_randtest(dot, state, ctx);
+ status |= gr_set(dot2, dot, ctx);
+
+ // Get two random sparse vectors
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= gr_sparse_vec_randtest(vec2, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+ status |= gr_vec_set_sparse_vec(dvec2, vec2, ctx);
+
+ // Compute sparse dot product and check against dense one
+ status |= gr_sparse_vec_dot(dot, dot, i % 2, vec, vec2, ctx);
+ status |= _gr_vec_dot(dot2, dot2, i % 2, dvec, dvec2, N, ctx);
+ if (gr_equal(dot, dot2, ctx) == T_FALSE)
+ {
+ gr_ctx_println(ctx);
+ flint_printf("vec = "); gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ flint_printf("vec2 = "); gr_sparse_vec_print_nz(vec2, ctx); flint_printf("\n");
+ flint_printf("dvec = "); _gr_vec_print(dvec, N, ctx); flint_printf("\n");
+ flint_printf("dvec2 = "); _gr_vec_print(dvec2, N, ctx); flint_printf("\n");
+ flint_printf("dot = "); gr_println(dot, ctx);
+ flint_printf("dot2 = "); gr_println(dot2, ctx);
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing from/to dense vec\n");
+ for (i = 0; i < 2*n_tests; i++)
+ {
+ // Give a random initial value to dot
+ status |= gr_randtest(dot, state, ctx);
+ status |= gr_set(dot2, dot, ctx);
+
+ // Get random sparse and dense vectors
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= _gr_vec_randtest(dvec2, state, N, ctx);
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+
+ // Compute sparse-dense dot product and check against dense-dense one
+ status |= gr_sparse_vec_dot_vec(dot, dot, i % 2, vec, dvec2, ctx);
+ status |= _gr_vec_dot(dot2, dot2, i % 2, dvec, dvec2, N, ctx);
+
+ if (gr_equal(dot, dot2, ctx) == T_FALSE)
+ {
+ gr_ctx_println(ctx);
+ flint_printf("vec = "); gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ flint_printf("vec2 = "); _gr_vec_print(dvec2, N, ctx); flint_printf("\n");
+ flint_printf("dot = "); gr_println(dot, ctx);
+ flint_printf("dot2 = "); gr_println(dot2, ctx);
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_sparse_vec_clear(vec, ctx);
+ gr_sparse_vec_clear(vec2, ctx);
+ GR_TMP_CLEAR_VEC(dvec, N, ctx);
+ GR_TMP_CLEAR_VEC(dvec2, N, ctx);
+ GR_TMP_CLEAR2(dot, dot2, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_vec_dot, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ gr_ctx_init_fmpz(ctx);
+ //gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_println(ctx);
+ CHECK_TEST(test_dot(state, ctx), "Dot product");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_vec/test/t-init.c b/src/gr_sparse_vec/test/t-init.c
new file mode 100644
index 0000000000..a9dd16b98c
--- /dev/null
+++ b/src/gr_sparse_vec/test/t-init.c
@@ -0,0 +1,179 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_vec.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_init(gr_ctx_t ctx)
+{
+ gr_sparse_vec_t vec;
+ gr_sparse_vec_init(vec, 5, ctx);
+ if (vec->length != 5 || vec->alloc != 0 || vec->nnz != 0 || vec->inds != NULL || vec->nzs != NULL)
+ return GR_TEST_FAIL;
+ gr_sparse_vec_clear(vec, ctx);
+ return GR_SUCCESS;
+}
+
+int
+test_init_from_entries_canonical(flint_rand_t state, gr_ctx_t ctx)
+{
+ // TODO: randomize length and nonzero cols
+ slong i;
+ gr_sparse_vec_t vec;
+ gr_ptr entries;
+ gr_ptr temp;
+ int status = GR_SUCCESS;
+ truth_t eq;
+ slong sz = ctx->sizeof_elem;
+ slong N = 5;
+ slong len = 10;
+ ulong inds[5] = {0, 2, 3, 6, 9};
+
+ //flint_printf("Running init test\n");
+ GR_TMP_INIT(temp, ctx);
+ GR_TMP_INIT_VEC(entries, N, ctx);
+ for (i = 0; i < N; ++i)
+ status |= gr_randtest_not_zero(GR_ENTRY(entries, i, sz), state, ctx);
+ if (status != GR_SUCCESS)
+ {
+ flint_printf("Failed to make random numbers!\n");
+ return GR_TEST_FAIL; // Not my fault!
+ }
+
+ gr_sparse_vec_init(vec, len, ctx);
+ status |= gr_sparse_vec_from_entries(vec, inds, entries, N, 1, ctx);
+
+ // Check parameters
+ if (status != GR_SUCCESS || vec->length != len || vec->alloc != 5 || vec->nnz != 5)
+ {
+ flint_printf("Bad params! %ld %ld %ld\n", vec->length, vec->alloc, vec->nnz);
+ return GR_TEST_FAIL;
+ }
+
+ // Check indices and entries
+ for (i = 0; i < N; ++i)
+ {
+ if (*gr_sparse_vec_ind_ptr(vec, i, ctx) != inds[i])
+ {
+ flint_printf("Bad indices!\n");
+ return GR_TEST_FAIL;
+ }
+ eq = gr_equal(gr_sparse_vec_entry_ptr(vec, i, ctx), GR_ENTRY(entries, i, sz), ctx);
+ if (eq == T_FALSE)
+ {
+ flint_printf("Bad elements!\n");
+ return GR_TEST_FAIL;
+ }
+ }
+ gr_sparse_vec_clear(vec, ctx);
+ GR_TMP_CLEAR_VEC(entries, N, ctx);
+ return status;
+}
+
+int
+test_init_from_entries_internal(ulong *inds, gr_srcptr entries, slong len, slong num, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong sz = ctx->sizeof_elem;
+ slong i,j;
+ gr_sparse_vec_t vec;
+ gr_ptr temp, temp2, temp3;
+
+ GR_TMP_INIT2(temp, temp2, ctx);
+ gr_sparse_vec_init(vec, len, ctx);
+ //flint_printf("entries: "); status |= _gr_vec_print(entries, num, ctx); flint_printf("\n");
+ status |= gr_sparse_vec_from_entries(vec, inds, entries, num, T_FALSE, ctx);
+ //flint_printf("vec: "); status |= gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ if (status != GR_SUCCESS)
+ return GR_TEST_FAIL;
+
+ // Check every entry (including the zeroes)
+ for (i = 0; i < len; i++)
+ {
+ // Compute the expected value of the entry
+ status |= gr_zero(temp, ctx);
+ for (j = 0; j < num; j++)
+ if (inds[j] == i)
+ status |= gr_add(temp, temp, GR_ENTRY(entries, j, sz), ctx);
+
+ status |= gr_sparse_vec_get_entry(temp2, vec, i, ctx);
+ temp3 = gr_sparse_vec_find_entry(vec, i, ctx);
+ if (
+ gr_equal(temp, temp2, ctx) == T_FALSE ||
+ (temp3 == NULL && gr_is_zero(temp, ctx) == T_FALSE) ||
+ (temp3 != NULL && gr_is_zero(temp3, ctx) == T_TRUE) ||
+ (temp3 != NULL && gr_equal(temp, temp3, ctx) == T_FALSE)
+ )
+ {
+ flint_printf("Failed on %d!\n", i);
+ gr_ctx_println(ctx);
+ gr_println(temp, ctx);
+ gr_println(temp2, ctx);
+ if (temp3 != NULL)
+ gr_println(temp3, ctx);
+ return GR_TEST_FAIL;
+ }
+ }
+ GR_TMP_CLEAR(temp, ctx);
+ GR_TMP_CLEAR(temp2, ctx);
+ gr_sparse_vec_clear(vec, ctx);
+ return status;
+}
+
+int
+test_init_from_entries(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ int status = GR_SUCCESS;
+ slong sz = ctx->sizeof_elem;
+ slong N = 5;
+ ulong inds[5] = {8, 4, 3, 8, 1};
+ gr_ptr entries;
+
+ GR_TMP_INIT_VEC(entries, N, ctx);
+
+ status |= _gr_vec_randtest(entries, state, N, ctx);
+ status |= test_init_from_entries_internal(inds, entries, 2*N, N, ctx);
+
+ /* Next test against some adversarial entries */
+ slong entries_si[5] = {5, 0, 2, -5, 1};
+ for (i = 0; i < N; i++)
+ status |= gr_set_si(GR_ENTRY(entries, i, sz), entries_si[i], ctx);
+ status |= test_init_from_entries_internal(inds, entries, 2*N, N, ctx);
+
+ GR_TMP_CLEAR_VEC(entries, N, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_vec_init, state)
+{
+ int i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while(1)
+ {
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_print(ctx); flint_printf("\n");
+ CHECK_TEST(test_init(ctx), "Init");
+ CHECK_TEST(test_init_from_entries_canonical(state, ctx), "Init from entries in canonical form");
+ CHECK_TEST(test_init_from_entries(state, ctx), "Init from entries in noncanonical form");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_vec/test/t-randtest.c b/src/gr_sparse_vec/test/t-randtest.c
new file mode 100644
index 0000000000..57feb5380b
--- /dev/null
+++ b/src/gr_sparse_vec/test/t-randtest.c
@@ -0,0 +1,78 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_vec.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_randtest(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong N = 1024;
+ slong n_tests = 10;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec;
+ gr_sparse_vec_init(vec, N, ctx);
+
+ //flint_printf("Testing w/o replacement\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_sparse_vec_randtest(vec, 128, 0, state, ctx);
+ if (!gr_sparse_vec_is_valid(vec, ctx) || vec->nnz != 128)
+ return GR_TEST_FAIL;
+ }
+
+ //flint_printf("Testing w/ replacement\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_sparse_vec_randtest(vec, 32, 1, state, ctx);
+ if (!gr_sparse_vec_is_valid(vec, ctx) || vec->nnz > 32 || vec->nnz < 24)
+ return GR_TEST_FAIL;
+ }
+
+ //flint_printf("Testing w/ prob\n");
+ for (i = 0; i < n_tests; i++)
+ {
+ status |= gr_sparse_vec_randtest_prob(vec, 0.125, state, ctx);
+ if (!gr_sparse_vec_is_valid(vec, ctx) || vec->nnz > 192 || vec->nnz < 64)
+ {
+ gr_sparse_vec_print_nz(vec, ctx); flint_printf("%ld\n", vec->nnz);
+ return GR_TEST_FAIL;
+ }
+
+ }
+ gr_sparse_vec_clear(vec, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_vec_randtest, state)
+{
+ int i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while(1)
+ {
+ //gr_ctx_init_nmod(ctx, 2147483647);
+ gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_print(ctx); flint_printf("\n");
+
+ CHECK_TEST(test_randtest(state, ctx), "Test random sparse vector generation");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_vec/test/t-sum-prod.c b/src/gr_sparse_vec/test/t-sum-prod.c
new file mode 100644
index 0000000000..703cb118c7
--- /dev/null
+++ b/src/gr_sparse_vec/test/t-sum-prod.c
@@ -0,0 +1,97 @@
+/*
+ Copyright (C) 2024 Kartik Venkatram and Alden Walker
+
+ This file is part of FLINT.
+
+ FLINT is free software: you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License (LGPL) as published
+ by the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version. See .
+*/
+
+#include "test_helpers.h"
+#include "gr_sparse_vec.h"
+
+#define CHECK_TEST(x, name) { if (GR_SUCCESS != (x)) { flint_printf("FAIL %s\n", (name)); flint_abort(); } }
+
+int test_sum_prod(flint_rand_t state, gr_ctx_t ctx)
+{
+ slong i;
+ slong N = 30;
+ slong n_tests = 20;
+ int status = GR_SUCCESS;
+ gr_sparse_vec_t vec;
+ gr_ptr dvec;
+ gr_ptr res, res2;
+ GR_TMP_INIT2(res, res2, ctx);
+
+ GR_TMP_INIT_VEC(dvec, N, ctx);
+ gr_sparse_vec_init(vec, N, ctx);
+
+ //flint_printf("Testing sum\n");
+ for (i = 0; i < 2*n_tests; i++)
+ {
+ // Get random sparse vector
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+
+ // Compute sparse sum and check against dense one
+ status |= gr_sparse_vec_sum(res, vec, ctx);
+ status |= _gr_vec_sum(res2, dvec, N, ctx);
+ if (gr_equal(res, res2, ctx) == T_FALSE)
+ {
+ gr_ctx_println(ctx);
+ flint_printf("vec = "); gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ flint_printf("res = "); gr_println(res, ctx);
+ flint_printf("res2 = "); gr_println(res2, ctx);
+ return GR_TEST_FAIL;
+ }
+ }
+
+ //flint_printf("Testing prod\n");
+ for (i = 0; i < 2*n_tests; i++)
+ {
+ // Get random sparse vector
+ status |= gr_sparse_vec_randtest(vec, 10, 0, state, ctx);
+ status |= gr_vec_set_sparse_vec(dvec, vec, ctx);
+
+ // Compute sparse sum and check against dense one
+ status |= gr_sparse_vec_nz_product(res, vec, ctx);
+ status |= _gr_vec_product(res2, vec->nzs, vec->nnz, ctx);
+ if (gr_equal(res, res2, ctx) == T_FALSE)
+ {
+ gr_ctx_println(ctx);
+ flint_printf("vec = "); gr_sparse_vec_print_nz(vec, ctx); flint_printf("\n");
+ flint_printf("res = "); gr_println(res, ctx);
+ flint_printf("res2 = "); gr_println(res2, ctx);
+ return GR_TEST_FAIL;
+ }
+ }
+
+ gr_sparse_vec_clear(vec, ctx);
+ GR_TMP_CLEAR_VEC(dvec, N, ctx);
+ GR_TMP_CLEAR2(res, res2, ctx);
+ return status;
+}
+
+TEST_FUNCTION_START(gr_sparse_vec_sum_prod, state)
+{
+ slong i;
+ gr_ctx_t ctx;
+ for (i = 0; i < 16; ++i)
+ {
+ while (1)
+ {
+ gr_ctx_init_fmpz(ctx);
+ //gr_ctx_init_random(ctx, state);
+ if (gr_ctx_is_zero_ring(ctx) == T_TRUE)
+ gr_ctx_clear(ctx);
+ else
+ break;
+ }
+ //gr_ctx_println(ctx);
+ CHECK_TEST(test_sum_prod(state, ctx), "Sum and (nonzero) product");
+ gr_ctx_clear(ctx);
+ }
+ TEST_FUNCTION_END(state);
+}
diff --git a/src/gr_sparse_vec/to_dense.c b/src/gr_sparse_vec/to_dense.c
new file mode 100644
index 0000000000..6a6367b870
--- /dev/null
+++ b/src/gr_sparse_vec/to_dense.c
@@ -0,0 +1,26 @@
+#include "gr_sparse_vec.h"
+
+int
+gr_vec_set_sparse_vec(gr_ptr vec, gr_sparse_vec_t src, gr_ctx_t ctx)
+{
+ slong i,j,sz,nnz,status,len;
+ status = GR_SUCCESS;
+ sz = ctx->sizeof_elem;
+ len = src->length;
+ nnz = src->nnz;
+ j = 0;
+ for (i = 0; i < len; i++)
+ {
+ /* if i is the column of the next nonzero in src, copy it in */
+ if (j < nnz && i == src->inds[j])
+ {
+ status |= gr_set(GR_ENTRY(vec, i, sz), GR_ENTRY(src->nzs, j, sz), ctx);
+ j++;
+ }
+ else
+ {
+ status |= gr_zero(GR_ENTRY(vec, i, sz), ctx);
+ }
+ }
+ return status;
+}
diff --git a/src/gr_sparse_vec/write.c b/src/gr_sparse_vec/write.c
new file mode 100644
index 0000000000..6c1ae1a5dc
--- /dev/null
+++ b/src/gr_sparse_vec/write.c
@@ -0,0 +1,30 @@
+#include
+#include "gr_sparse_vec.h"
+
+int
+gr_sparse_vec_write_nz(gr_stream_t out, const gr_sparse_vec_t vec, gr_ctx_t ctx)
+{
+ int status = GR_SUCCESS;
+ slong i, sz = ctx->sizeof_elem;
+ slong nnz = vec->nnz;
+ gr_stream_write(out, "[");
+ for (i = 0; i < nnz; i++)
+ {
+ //gr_stream_write(out, "\n\t");
+ gr_stream_write_si(out, vec->inds[i]);
+ gr_stream_write(out, ": ");
+ status |= gr_write(out, GR_ENTRY(vec->nzs, i, sz), ctx);
+ if (i < nnz - 1)
+ gr_stream_write(out, ", ");
+ }
+ gr_stream_write(out, "]");
+ //gr_stream_write(out, "\n]");
+ return status;
+}
+
+int gr_sparse_vec_print_nz(const gr_sparse_vec_t vec, gr_ctx_t ctx)
+{
+ gr_stream_t out;
+ gr_stream_init_file(out, stdout);
+ return gr_sparse_vec_write_nz(out, vec, ctx);
+}
diff --git a/src/gr_vec.h b/src/gr_vec.h
index d09665a29d..ce129133d9 100644
--- a/src/gr_vec.h
+++ b/src/gr_vec.h
@@ -58,6 +58,14 @@ int _gr_vec_print(gr_srcptr vec, slong len, gr_ctx_t ctx);
int gr_vec_print(const gr_vec_t vec, gr_ctx_t ctx);
GR_VEC_INLINE WARN_UNUSED_RESULT int _gr_vec_zero(gr_ptr vec, slong len, gr_ctx_t ctx) { return GR_VEC_CONSTANT_OP(ctx, VEC_ZERO)(vec, len, ctx); }
+GR_VEC_INLINE WARN_UNUSED_RESULT int _gr_vec_one(gr_ptr vec, slong pos, slong len, gr_ctx_t ctx)
+{
+ int status;
+ if (pos < 0 || len < 0 || pos >= len)
+ return GR_DOMAIN;
+ status = GR_VEC_CONSTANT_OP(ctx, VEC_ZERO)(vec, len, ctx);
+ return status | gr_one(GR_ENTRY(vec, pos, ctx->sizeof_elem), ctx);
+}
GR_VEC_INLINE WARN_UNUSED_RESULT int _gr_vec_set(gr_ptr res, gr_srcptr src, slong len, gr_ctx_t ctx) { return GR_VEC_OP(ctx, VEC_SET)(res, src, len, ctx); }
GR_VEC_INLINE WARN_UNUSED_RESULT int _gr_vec_neg(gr_ptr res, gr_srcptr src, slong len, gr_ctx_t ctx) { return GR_VEC_OP(ctx, VEC_NEG)(res, src, len, ctx); }