Python scipy.sparse.indices() Examples
The following are 17
code examples of scipy.sparse.indices().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.sparse
, or try the search function
.
Example #1
Source File: word2vecReaderUtils.py From word2vec-twitter with MIT License | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #2
Source File: utils.py From xlinkBook with MIT License | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #3
Source File: utils.py From xlinkBook with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ #logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): #logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #4
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #5
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #6
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #7
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #8
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #9
Source File: utils.py From category2vec with GNU Lesser General Public License v3.0 | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #10
Source File: utils.py From category2vec with GNU Lesser General Public License v3.0 | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #11
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #12
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #13
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #14
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def run(self): if self.as_numpy: import numpy # don't clutter the global namespace with a dependency on numpy it = iter(self.corpus) while True: chunk = itertools.islice(it, self.chunksize) if self.as_numpy: # HACK XXX convert documents to numpy arrays, to save memory. # This also gives a scipy warning at runtime: # "UserWarning: indices array has non-integer dtype (float64)" wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]] else: wrapped_chunk = [list(chunk)] if not wrapped_chunk[0]: self.q.put(None, block=True) break try: qsize = self.q.qsize() except NotImplementedError: qsize = '?' logger.debug("prepared another chunk of %i documents (qsize=%s)" % (len(wrapped_chunk[0]), qsize)) self.q.put(wrapped_chunk.pop(), block=True) #endclass InputQueue
Example #15
Source File: utils.py From topical_word_embeddings with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #16
Source File: word2vecReaderUtils.py From word2vec-twitter with MIT License | 5 votes |
def load(cls, fname, mmap=None): """ Load a previously saved object from file (also see `save`). If the object was saved with large arrays stored separately, you can load these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use mmap, load large arrays as normal objects. """ logger.info("loading %s object from %s" % (cls.__name__, fname)) subname = lambda suffix: fname + '.' + suffix + '.npy' obj = unpickle(fname) for attrib in getattr(obj, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap)) for attrib in getattr(obj, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap)) sparse = unpickle(subname(attrib)) sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap) sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap) sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap) setattr(obj, attrib, sparse) for attrib in getattr(obj, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(obj, attrib, None) return obj
Example #17
Source File: utils.py From ohmnet with MIT License | 4 votes |
def _load_specials(self, fname, mmap, compress, subname): """ Loads any attributes that were stored specially, and gives the same opportunity to recursively included SaveLoad instances. """ mmap_error = lambda x, y: IOError( 'Cannot mmap compressed object %s in file %s. ' % (x, y) + 'Use `load(fname, mmap=None)` or uncompress files manually.') for attrib in getattr(self, '__recursive_saveloads', []): cfname = '.'.join((fname, attrib)) logger.info("loading %s recursively from %s.* with mmap=%s" % ( attrib, cfname, mmap)) getattr(self, attrib)._load_specials(cfname, mmap, compress, subname) for attrib in getattr(self, '__numpys', []): logger.info("loading %s from %s with mmap=%s" % ( attrib, subname(fname, attrib), mmap)) if compress: if mmap: raise mmap_error(attrib, subname(fname, attrib)) val = numpy.load(subname(fname, attrib))['val'] else: val = numpy.load(subname(fname, attrib), mmap_mode=mmap) setattr(self, attrib, val) for attrib in getattr(self, '__scipys', []): logger.info("loading %s from %s with mmap=%s" % ( attrib, subname(fname, attrib), mmap)) sparse = unpickle(subname(fname, attrib)) if compress: if mmap: raise mmap_error(attrib, subname(fname, attrib)) with numpy.load(subname(fname, attrib, 'sparse')) as f: sparse.data = f['data'] sparse.indptr = f['indptr'] sparse.indices = f['indices'] else: sparse.data = numpy.load(subname(fname, attrib, 'data'), mmap_mode=mmap) sparse.indptr = numpy.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap) sparse.indices = numpy.load(subname(fname, attrib, 'indices'), mmap_mode=mmap) setattr(self, attrib, sparse) for attrib in getattr(self, '__ignoreds', []): logger.info("setting ignored attribute %s to None" % (attrib)) setattr(self, attrib, None)