1 #ifndef BMSPARSEVEC_H__INCLUDED__ 2 #define BMSPARSEVEC_H__INCLUDED__ 32 #ifndef BM__H__INCLUDED__ 35 # error missing include (bm.h or bm64.h) 80 template<
class Val,
class BV>
122 sv_.set(idx_, (value_type)ref);
131 {
return bool(*
this) == bool(ref); }
132 bool is_null()
const {
return sv_.is_null(idx_); }
178 {
return (pos_ == it.pos_) && (sv_ == it.sv_); }
180 {
return ! operator==(it); }
182 {
return pos_ < it.pos_; }
184 {
return pos_ <= it.pos_; }
186 {
return pos_ > it.pos_; }
188 {
return pos_ >= it.pos_; }
203 value_type value()
const;
215 size_type
pos()
const {
return pos_; }
218 void go_to(size_type pos);
223 void skip_zero_values();
227 n_buf_size = 1024 * 8
231 const sparse_vector_type* sv_;
233 mutable buffer_type buffer_;
234 mutable value_type* buf_ptr_;
235 mutable allocator_pool_type pool_;
278 this->flush(); sv_ = bi.sv_;
294 void add(value_type v);
300 void add_null(size_type count);
314 size_type add_value(value_type v);
319 n_buf_size = 1024 * 8
324 bvector_type* bv_null_;
326 value_type* buf_ptr_;
395 reference
operator[](size_type idx) {
return reference(*
this, idx); }
402 value_type
operator[](size_type idx)
const {
return this->
get(idx); }
409 value_type
at(size_type idx)
const;
415 value_type
get(size_type idx)
const;
422 void set(size_type idx, value_type v);
428 void inc(size_type idx);
447 void insert(size_type idx, value_type v);
453 void erase(size_type idx);
514 void import(
const value_type* arr, size_type arr_size, size_type offset = 0);
521 void import_back(
const value_type* arr, size_type arr_size);
546 size_type
decode(value_type* arr,
549 bool zero_mem =
true)
const;
577 size_type
gather(value_type* arr,
578 const size_type* idx,
602 bool set_null =
false);
655 int compare(size_type idx,
const value_type val)
const;
730 size_type left, size_type right);
737 void filter(
const bvector_type& bv_mask);
769 size_type
extract(value_type* arr,
771 size_type offset = 0,
772 bool zero_mem =
true,
773 allocator_pool_type* pool_ptr = 0)
const;
782 bool zero_mem =
true)
const;
791 bool zero_mem =
true)
const;
818 bool find_rank(size_type rank, size_type& pos);
845 void set_value(size_type idx, value_type v);
880 template<
class Val,
class BV>
883 allocation_policy_type ap,
884 size_type bv_max_size,
885 const allocator_type& alloc)
886 : parent_type(null_able, ap, bv_max_size, alloc)
891 template<
class Val,
class BV>
899 template<
class Val,
class BV>
902 parent_type::swap(sv);
910 template<
class Val,
class BV>
916 template<
class Val,
class BV>
919 parent_type::swap(sv);
924 template<
class Val,
class BV>
928 throw std::range_error(err_msg);
936 template<
class Val,
class BV>
939 BV::throw_bad_alloc();
944 template<
class Val,
class BV>
952 template<
class Val,
class BV>
957 unsigned char b_list[
sizeof(Val)*8];
958 unsigned row_len[
sizeof(Val)*8] = {0, };
960 const unsigned transpose_window = 256;
964 throw_range_error(
"sparse_vector range error (import size 0)");
966 if (offset < this->size_)
969 this->clear_range(offset, offset + arr_size - 1);
979 for (i = 0; i < arr_size; ++i)
982 const size_type bit_idx = i + offset;
984 for (
unsigned j = 0; j < bcnt; ++j)
986 unsigned p = b_list[j];
987 unsigned rl = row_len[p];
988 tm.
row(p)[rl] = bit_idx;
991 if (rl == transpose_window)
993 bvector_type* bv = this->get_plain(p);
994 const size_type* r = tm.
row(p);
1004 for (
unsigned k = 0; k < tm.
rows(); ++k)
1006 unsigned rl = row_len[k];
1009 bvector_type* bv = this->get_plain(k);
1010 const size_type* r = tm.
row(k);
1016 if (i + offset > this->size_)
1017 this->size_ = i + offset;
1019 bvector_type* bv_null = this->get_null_bvect();
1022 bv_null->
set_range(offset, offset + arr_size - 1);
1028 template<
class Val,
class BV>
1032 this->
import(arr, arr_size, this->size());
1037 template<
class Val,
class BV>
1042 bool zero_mem)
const 1046 return extract_range(arr, dec_size, idx_from, zero_mem);
1048 return extract_plains(arr, dec_size, idx_from, zero_mem);
1061 template<
class Val,
class BV>
1064 const size_type* idx,
1074 arr[0] = this->
get(idx[0]);
1077 ::memset(arr, 0,
sizeof(value_type)*size);
1079 for (size_type i = 0; i < size;)
1081 bool sorted_block =
true;
1093 size_type idx_prev = idx[r];
1096 sorted_block = !(idx[r] < idx_prev);
1102 sorted_block =
false;
1103 for (; r < size; ++r)
1128 arr[i] = this->
get(idx[i]);
1138 unsigned eff_plains = this->effective_plains();
1139 for (
unsigned j = 0; j < eff_plains; ++j)
1141 const bm::word_t* blk = this->bmatr_.get_block(j, i0, j0);
1145 const value_type mask1 = 1;
1150 for (size_type k = i; k < r; ++k)
1161 for (size_type k = i; k < r; )
1166 unsigned gap_value = gap_blk[gidx];
1169 arr[k] |= vm = (mask1 << j);
1170 for (++k; k < r; ++k)
1172 if (
unsigned(idx[k] & bm::set_block_mask) <= gap_value)
1182 (
unsigned(idx[k] & bm::set_block_mask) <= gap_value);
1189 for (size_type k = i; k < r; ++k)
1193 arr[k] |= (value_type(
bool(is_set)) << j);
1210 template<
class Val,
class BV>
1215 bool zero_mem)
const 1220 ::memset(arr, 0,
sizeof(value_type)*size);
1222 size_type start = offset;
1223 size_type end = start + size;
1224 if (end > this->size_)
1240 for (
unsigned j = 0; j <
sizeof(Val)*8; ++j)
1242 blk = this->bmatr_.get_block(j, i0, j0);
1245 for (size_type k = start; k < end; ++k)
1252 j0 = unsigned(nb & bm::set_array_mask);
1253 blk = this->bmatr_.get_block(j, i0, j0);
1258 nbit = unsigned(k & bm::set_block_mask);
1274 is_set = (blk[nword] & mask0);
1277 size_type idx = k - offset;
1278 value_type vm = (bool) is_set;
1290 template<
class Val,
class BV>
1295 bool zero_mem)
const 1301 ::memset(arr, 0,
sizeof(value_type)*size);
1303 size_type start = offset;
1304 size_type end = start + size;
1305 if (end > this->size_)
1310 for (size_type i = 0; i < parent_type::value_bits(); ++i)
1312 const bvector_type* bv = this->bmatr_.get_row(i);
1316 value_type mask = 1;
1318 typename BV::enumerator en(bv, offset);
1319 for (;en.valid(); ++en)
1321 size_type idx = *en - offset;
1335 template<
class Val,
class BV>
1341 allocator_pool_type* pool_ptr)
const 1346 struct sv_decode_visitor_func
1348 sv_decode_visitor_func(value_type* varr,
1351 : arr_(varr), mask_(mask), off_(off)
1354 void add_bits(size_type arr_offset,
const unsigned char* bits,
unsigned bits_size)
1356 size_type idx_base = arr_offset - off_;
1357 const value_type m = mask_;
1359 for (; i < bits_size; ++i)
1360 arr_[idx_base + bits[i]] |= m;
1363 void add_range(size_type arr_offset,
unsigned sz)
1365 size_type idx_base = arr_offset - off_;
1366 const value_type m = mask_;
1367 for (
unsigned i = 0; i < sz; ++i)
1368 arr_[i + idx_base] |= m;
1380 ::memset(arr, 0,
sizeof(value_type)*size);
1382 size_type start = offset;
1383 size_type end = start + size;
1384 if (end > this->size_)
1389 bool masked_scan = !(offset == 0 && size == this->size());
1392 bvector_type bv_mask;
1395 for (size_type i = 0; i < parent_type::value_bits(); ++i)
1397 const bvector_type* bv = this->bmatr_.get_row(i);
1401 sv_decode_visitor_func func(arr, (value_type(1) << i), offset);
1408 for (size_type i = 0; i < parent_type::value_bits(); ++i)
1410 const bvector_type* bv = this->bmatr_.get_row(i);
1413 sv_decode_visitor_func func(arr, (value_type(1) << i), 0);
1424 template<
class Val,
class BV>
1428 if (idx >= this->size_)
1429 throw_range_error(
"sparse vector range error");
1430 return this->
get(idx);
1435 template<
class Val,
class BV>
1443 unsigned eff_plains = this->effective_plains();
1444 for (
unsigned j = 0; j < eff_plains; j+=4)
1446 bool b = this->bmatr_.test_4rows(j);
1449 value_type vm = this->bmatr_.get_half_octet(i, j);
1459 template<
class Val,
class BV>
1464 this->size_ = idx+1;
1471 template<
class Val,
class BV>
1475 this->size_ = idx+1;
1477 set_value(idx, (value_type)0);
1480 bvector_type* bv_null = this->get_null_bvect();
1482 bv_null->
set(idx,
false);
1488 template<
class Val,
class BV>
1491 set_value(this->size_, v);
1497 template<
class Val,
class BV>
1504 this->size_ += count;
1510 template<
class Val,
class BV>
1515 this->size_ = idx+1;
1519 insert_value(idx, v);
1524 template<
class Val,
class BV>
1527 insert_value_no_null(idx, v);
1528 this->insert_null(idx,
true);
1533 template<
class Val,
class BV>
1537 value_type mask = 1u;
1539 for (; i <= bsr; ++i)
1543 bvector_type* bv = this->get_plain(i);
1548 bvector_type_ptr bv = this->bmatr_.get_row(i);
1555 unsigned eff_plains = this->effective_plains();
1556 for (; i < eff_plains; ++i)
1558 bvector_type* bv = this->bmatr_.get_row(i);
1568 template<
class Val,
class BV>
1572 if (idx >= this->size_)
1574 this->erase_column(idx);
1581 template<
class Val,
class BV>
1584 set_value_no_null(this->size_, v);
1590 template<
class Val,
class BV>
1593 set_value_no_null(idx, v);
1594 bvector_type* bv_null = this->get_null_bvect();
1601 template<
class Val,
class BV>
1611 unsigned eff_plains = this->effective_plains();
1614 for (
unsigned i = bsr; i < eff_plains; ++i)
1616 const bm::word_t* blk = this->bmatr_.get_block(i, i0, j0);
1619 bvector_type* bv = this->bmatr_.get_row(i);
1626 value_type mask = 1u;
1627 for (
unsigned j = 0; j <= bsr; ++j)
1631 bvector_type* bv = this->get_plain(j);
1636 const bm::word_t* blk = this->bmatr_.get_block(j, i0, j0);
1639 bvector_type* bv = this->bmatr_.get_row(j);
1650 template<
class Val,
class BV>
1653 if (idx >= this->size_)
1654 this->size_ = idx+1;
1656 for (
unsigned i = 0; i < parent_type::sv_value_plains; ++i)
1658 bvector_type* bv = this->get_plain(i);
1659 bool carry_over = bv->
inc(idx);
1663 bvector_type* bv_null = this->get_null_bvect();
1670 template<
class Val,
class BV>
1673 parent_type::clear();
1678 template<
class Val,
class BV>
1688 template<
class Val,
class BV>
1695 parent_type::clear_range(left, right, set_null);
1701 template<
class Val,
class BV>
1707 parent_type::calc_stat(&stbv);
1717 template<
class Val,
class BV>
1725 parent_type::optimize(temp_block, opt_mode, st ? &stbv : 0);
1736 template<
class Val,
class BV>
1739 unsigned stored_plains = this->stored_plains();
1740 for (
unsigned j = 0; j < stored_plains; ++j)
1742 bvector_type* bv = this->bmatr_.get_row(j);
1752 template<
class Val,
class BV>
1756 size_type arg_size = sv.
size();
1757 if (this->size_ < arg_size)
1761 bvector_type* bv_null = this->get_null_bvect();
1764 plains = this->stored_plains();
1766 plains = this->plains();
1768 for (
unsigned j = 0; j < plains; ++j)
1770 const bvector_type* arg_bv = sv.
bmatr_.
row(j);
1773 bvector_type* bv = this->bmatr_.get_row(j);
1775 bv = this->get_plain(j);
1791 template<
class Val,
class BV>
1795 size_type arg_size = sv.
size();
1796 if (this->size_ < arg_size)
1800 bvector_type* bv_null = this->get_null_bvect();
1803 plains = this->stored_plains();
1805 plains = this->plains();
1807 for (
unsigned j = 0; j < plains; ++j)
1812 bvector_type* bv = this->bmatr_.get_row(j);
1814 bv = this->get_plain(j);
1830 template<
class Val,
class BV>
1838 bvector_type* bv_null = this->get_null_bvect();
1842 plains = this->stored_plains();
1845 bv_null->
copy_range(*bv_null_arg, left, right);
1848 plains = this->plains();
1850 for (
unsigned j = 0; j < plains; ++j)
1852 const bvector_type* arg_bv = sv.
bmatr_.
row(j);
1855 bvector_type* bv = this->bmatr_.get_row(j);
1857 bv = this->get_plain(j);
1861 this->resize(sv.
size());
1865 template<
class Val,
class BV>
1869 bvector_type* bv_null = this->get_null_bvect();
1873 plains = this->stored_plains();
1877 plains = this->plains();
1879 for (
unsigned j = 0; j < plains; ++j)
1881 bvector_type* bv = this->bmatr_.get_row(j);
1889 template<
class Val,
class BV>
1893 value_type sv_value =
get(idx);
1894 return (sv_value > val) - (sv_value < val);
1899 template<
class Val,
class BV>
1903 return parent_type::equal(sv, null_able);
1908 template<
class Val,
class BV>
1913 return it_type(
this);
1918 template<
class Val,
class BV>
1931 template<
class Val,
class BV>
1933 : sv_(0), pos_(
bm::
id_max), buf_ptr_(0)
1938 template<
class Val,
class BV>
1941 : sv_(it.sv_), pos_(it.pos_), buf_ptr_(0)
1946 template<
class Val,
class BV>
1949 : sv_(sv), buf_ptr_(0)
1957 template<
class Val,
class BV>
1961 : sv_(sv), buf_ptr_(0)
1969 template<
class Val,
class BV>
1978 template<
class Val,
class BV>
1984 if (pos_ >= sv_->
size())
1991 if (buf_ptr_ - ((value_type*)buffer_.data()) >= n_buf_size)
1999 template<
class Val,
class BV>
2008 buffer_.reserve(n_buf_size *
sizeof(value_type));
2009 buf_ptr_ = (value_type*)(buffer_.data());
2010 sv_->
extract(buf_ptr_, n_buf_size, pos_,
true, &pool_);
2018 template<
class Val,
class BV>
2021 value_type v =
value();
2025 value_type* buf_end = ((value_type*)buffer_.data()) + n_buf_size;
2029 if (++buf_ptr_ < buf_end)
2034 if (pos_ >= sv_->
size())
2039 if (buf_ptr_ >= buf_end)
2046 template<
class Val,
class BV>
2058 template<
class Val,
class BV>
2060 : sv_(0), bv_null_(0), buf_ptr_(0)
2065 template<
class Val,
class BV>
2068 : sv_(sv), buf_ptr_(0)
2070 bv_null_ = sv_? sv_->get_null_bvect() : 0;
2075 template<
class Val,
class BV>
2078 : sv_(bi.sv_), bv_null_(bi.bv_null_), buf_ptr_(0)
2085 template<
class Val,
class BV>
2093 template<
class Val,
class BV>
2101 bv_null_->set_bit_no_check(sz + buf_idx);
2107 template<
class Val,
class BV>
2115 buffer_.reserve(n_buf_size *
sizeof(value_type));
2116 buf_ptr_ = (value_type*)(buffer_.data());
2121 if (buf_ptr_ - ((value_type*)buffer_.data()) >= n_buf_size)
2124 buf_ptr_ = (value_type*)(buffer_.data());
2128 size_type buf_idx =
size_type(buf_ptr_ - ((value_type*)buffer_.data()));
2135 template<
class Val,
class BV>
2143 template<
class Val,
class BV>
2149 for (size_type i = 0; i < count; ++i)
2155 sv_->push_back_null(count);
2161 template<
class Val,
class BV>
2164 return (!buf_ptr_ || !sv_);
2169 template<
class Val,
class BV>
2174 value_type* d = (value_type*)buffer_.data();
2175 sv_->import_back(d,
size_type(buf_ptr_ - d));
std::input_iterator_tag iterator_category
back_insert_iterator & operator++(int)
noop
value_type at(size_type idx) const
access specified element with bounds checking
size_type effective_vector_max() const
Always 1 (non-matrix type)
void clear_bit_no_check(size_type n)
Clears bit n without precondiion checks.
void filter(const bvector_type &bv_mask)
Apply value filter, defined by mask vector.
bool is_nullable() const
check if container supports NULL(unassigned) values
bm::basic_bmatrix< BV > bmatrix_type
back_insert_iterator & operator=(value_type v)
push value to the vector
void erase(size_type idx)
erase specified element from container
bvector_type * bvector_type_ptr
unsigned char * init_remap_buffer()
friend back_insert_iterator
Base class for bit-transposed sparse vector construction.
const unsigned set_word_shift
const value_type & const_reference
sparse_vector_type::value_type value_type
void optimize_gap_size()
Optimize sizes of GAP blocks.
const unsigned set_array_shift
back_insert_iterator & operator*()
noop
bm::byte_buffer< allocator_type > buffer_type
void optimize(bm::word_t *temp_block=0, typename bvector_type::optmode opt_mode=bvector_type::opt_compress, typename sparse_vector< Val, BV >::statistics *stat=0)
run memory optimization for all vector plains
bool valid() const
Returns true if iterator is at a valid position.
void insert(size_type idx, value_type v)
insert specified element into container
bool is_null() const
Get NULL status.
void add(value_type v)
add value to the container
Rank-Select compressed sparse vector.
sparse_vector< Val, BV > & merge(sparse_vector< Val, BV > &sv)
merge with another sparse vector using OR operation Merge is different from join(), because it borrows data from the source vector, so it gets modified.
basic bit-matrix class and utilities
void add_null()
add NULL (no-value) to the container
static bool find_rank(size_type rank, size_type &pos)
find position of compressed element by its rank
pre-processor un-defines to avoid global space pollution (internal)
const bvector_type * row(size_type i) const
void resize_internal(size_type sz)
algorithms for sparse_vector scan/seach
size_type decode(value_type *arr, size_type idx_from, size_type dec_size, bool zero_mem=true) const
Bulk export list of elements to a C-style array.
bool inc(size_type n)
Increment the specified element.
sparse_vector_type::size_type size_type
Back insert iterator implements buffered insert, faster than generic access assignment.
static bool is_compressed()
trait if sparse vector is "compressed" (false)
void xor_swap(W &x, W &y)
XOR swap two scalar variables.
bvector_type::allocator_type allocator_type
back_insert_iterator & operator=(const back_insert_iterator &bi)
void inc(size_type idx)
increment specified element by one
compress blocks when possible (GAP/prefix sum)
void insert_value_no_null(size_type idx, value_type v)
insert value without checking boundaries or support of NULL
const bvector_type * get_null_bvector() const
Get bit-vector of assigned values or NULL (if not constructed that way)
void go_to(size_type pos)
re-position to a specified position
sparse_vector< Val, BV > & join(const sparse_vector< Val, BV > &sv)
join all with another sparse vector using OR operation
do not support NULL values
void set_value_no_null(size_type idx, value_type v)
set value without checking boundaries or support of NULL
Statistical information about bitset's memory allocation details.
Algorithms for bvector<> (main include)
bool operator!=(const const_iterator &it) const
void optimize_gap_size()
Optimize sizes of GAP blocks.
void set_allocator_pool(allocator_pool_type *pool_ptr)
Set allocator pool for local (non-threaded) memory cyclic(lots of alloc-free ops) opertations...
unsigned idx_arr_block_lookup_u32(const unsigned *idx, unsigned size, unsigned nb, unsigned start)
block boundaries look ahead U32
void import_back(const value_type *arr, size_type arr_size)
Import list of elements from a C-style array (pushed back)
void push_back_null(size_type count)
push back specified amount of NULL values
bvector_type::enumerator bvector_enumerator_type
sparse vector with runtime compression using bit transposition method
null_support
NULL-able value support.
blocks_manager_type::block_idx_type block_idx_type
unsigned bit_scan_reverse(T value)
void copy_range(const bvector< Alloc > &bvect, size_type left, size_type right)
Copy all bits in the specified closed interval [left,right].
input set is sorted (ascending order)
bvector_type::allocator_type::allocator_pool_type allocator_pool_type
int compare(size_type idx, const value_type val) const
Compare vector element with argument.
void flush()
flush the accumulated buffer
sort_order
Sort order declaration.
bvector< Alloc > & set(size_type n, bool val=true)
Sets bit n if val is true, clears bit n if val is false.
#define IS_FULL_BLOCK(addr)
size_type pos() const
Current position (index) in the vector.
size_type extract(value_type *arr, size_type size, size_type offset=0, bool zero_mem=true, allocator_pool_type *pool_ptr=0) const
Bulk export list of elements to a C-style array.
void for_each_bit(const BV &bv, Func &bit_functor)
bit-vector visitor scanner to traverse each 1 bit using C++ visitor
const_iterator & operator++(int)
Advance to the next available value.
void push_back(value_type v)
push value back into vector
bvector_type::allocator_type::allocator_pool_type allocator_pool_type
const_iterator begin() const
Provide const iterator access to container content.
bvector_type::allocation_policy allocation_policy_type
support "non-assigned" or "NULL" logic
Basic dense bit-matrix class.
void resize(size_type new_size)
unsigned gap_bfind(const T *buf, unsigned pos, unsigned *is_set)
sparse_vector< Val, BV > sparse_vector_type
size_type extract_plains(value_type *arr, size_type size, size_type offset, bool zero_mem=true) const
extract medium window without use of masking vector
bool operator==(const const_iterator &it) const
unsigned gap_test_unr(const T *buf, const unsigned pos)
Tests if bit = pos is true. Analog of bm::gap_test with SIMD unrolling.
reference operator[](size_type idx)
Operator to get write access to an element.
bool empty() const
return true if vector is empty
sparse_vector< Val, BV > & clear_range(size_type left, size_type right, bool set_null=false)
clear range (assign bit 0 for all plains)
bool empty() const
return true if insertion buffer is empty
sparse_vector_type::bvector_type bvector_type
void resize(size_type sz)
resize vector
value_type value() const
Get current position (value)
void sync(bool)
syncronize internal structures
const unsigned set_array_mask
size_type add_value(value_type v)
add value to the buffer without changing the NULL vector
unsigned short gap_word_t
sparse_vector< Val, BV > sparse_vector_type
back_insert_iterator get_back_inserter()
Provide back insert iterator Back insert iterator implements buffered insertion, which is faster...
sparse_vector_type::value_type value_type
bvector_type::block_idx_type block_idx_type
size_type extract_range(value_type *arr, size_type size, size_type offset, bool zero_mem=true) const
extract small window without use of masking vector
sparse_vector_type * sparse_vector_type_ptr
size_t remap_size() const
BV::allocator_type allocator_type
static size_type translate_address(size_type i)
address translation for this type of container
bvector< Alloc > & set_range(size_type left, size_type right, bool value=true)
Sets all bits in the specified closed interval [left,right] Interval must be inside the bvector's siz...
bvector_type::allocator_type allocator_type
void set_null(size_type idx)
set specified element to unassigned value (NULL)
bm::id64_t idx_arr_block_lookup_u64(const bm::id64_t *idx, bm::id64_t size, bm::id64_t nb, bm::id64_t start)
block boundaries look ahead U32
void reset()
Reset statisctics.
sparse_vector_type::bvector_type bvector_type
bool is_null(size_type idx) const
test if specified element is NULL
size_type gather(value_type *arr, const size_type *idx, size_type size, bm::sort_order sorted_idx) const
Gather elements to a C-style array.
allocator_type::allocator_pool_type allocator_pool_type
const unsigned char * get_remap_buffer() const
value_type operator[](size_type idx) const
get specified element without bounds checking
const_iterator & operator++()
Advance to the next available value.
void copy_from(const base_sparse_vector< Val, BV, MAX_SIZE > &bsv)
#define BM_ASSERT_THROW(x, xerrcode)
Serialize sparse vector into a memory buffer(s) structure.
reference(sparse_vector< Val, BV > &sv, size_type idx) BMNOEXEPT
static void throw_range_error(const char *err_msg)
throw range error
back_insert_iterator & operator++()
noop
const bvector_type * bvector_type_const_ptr
Utilities for bit transposition (internal) (experimental!)
void merge(bm::bvector< Alloc > &bvect)
Merge/move content from another vector.
void copy_range(const sparse_vector< Val, BV > &sv, size_type left, size_type right)
copy range of values from another sparse vector
void insert_value(size_type idx, value_type v)
insert value without checking boundaries
void calc_stat(struct sparse_vector< Val, BV >::statistics *st) const
Calculates memory statistics.
size_type size() const
return size of the vector
std::output_iterator_tag iterator_category
const unsigned set_block_mask
void clear() BMNOEXEPT
resize to zero, free memory
bvector_type_const_ptr get_row(size_type i) const
Constant iterator designed to enumerate "ON" bits.
optmode
Optimization mode Every next level means additional checks (better compression vs time) ...
allocator_type::allocator_pool_type allocator_pool_type
Const iterator to traverse the sparse vector.
bool equal(const sparse_vector< Val, BV > &sv, bm::null_support null_able=bm::use_null) const
check if another sparse vector has the same content and size
~sparse_vector() BMNOEXEPT
base_sparse_vector< Val, BV, 1 > parent_type
sparse vector de-serializer
reference & operator=(const reference &ref)
const unsigned set_word_mask
Reference class to access elements via common [] operator.
bm::bvector< Alloc > & bit_and(const bm::bvector< Alloc > &bv1, const bm::bvector< Alloc > &bv2, typename bm::bvector< Alloc >::optmode opt_mode)
3-operand AND : this := bv1 AND bv2
bool operator==(const reference &ref) const
void invalidate()
Invalidate current iterator.
bvector_type::size_type size_type
void advance()
advance iterator forward by one
const_iterator get_const_iterator(size_type idx) const
Get const_itertor re-positioned to specific element.
size_type size_internal() const
unsigned short bitscan(V w, B *bits)
reference & operator=(value_type val)
void push_back_no_null(value_type v)
push value back into vector without NULL semantics
void set_allocator_pool(allocator_pool_type *pool_ptr)
Set allocator pool for local (non-threaded) memory cyclic(lots of alloc-free ops) opertations...
void set_value(size_type idx, value_type v)
set value without checking boundaries
value_type operator*() const
Get current position (value)
bmatrix_type bmatr_
bit-transposed matrix
Structure with statistical information about memory allocation footprint, serialization projection...
const unsigned set_block_shift
void add(const bv_statistics &st)
Sum data from another sttructure.
const_iterator end() const
Provide const iterator access to the end.
Mini-matrix for bit transposition purposes.
size_type effective_size() const
size of sparse vector (may be different for RSC)
void bit_block_gather_scatter(unsigned *arr, const bm::word_t *blk, const unsigned *idx, unsigned size, unsigned start, unsigned bit_idx)
bit index to word gather-scatter algorithm (SIMD)
sparse_vector_type * sparse_vector_type_ptr
const T * row(unsigned row_idx) const
static void throw_bad_alloc()
throw bad alloc
void set_bit_no_check(size_type n)
Set bit without checking preconditions (size, etc)
bm::byte_buffer< allocator_type > buffer_type
size_type size_
array size
#define FULL_BLOCK_FAKE_ADDR
sorted and in one block (internal!)
sparse_vector_type::size_type size_type
bool insert(size_type n, bool value)
Insert bit into specified position All the vector content after insert position is shifted right...
sparse_vector(bm::null_support null_able=bm::no_null, allocation_policy_type ap=allocation_policy_type(), size_type bv_max_size=bm::id_max, const allocator_type &alloc=allocator_type())
Sparse vector constructor.
void swap(sparse_vector< Val, BV > &sv) BMNOEXEPT
content exchange
sparse_vector< Val, BV > & operator=(const sparse_vector< Val, BV > &sv)