Python torch.Storage() Examples
The following are 13
code examples of torch.Storage().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: cu_net_prev_version.py From CU-Net with Apache License 2.0 | 6 votes |
def __init__(self, in_num, out_num, layer_num, max_link, storage_size=1024): input_storage_1 = torch.Storage(storage_size) input_storage_2 = torch.Storage(storage_size) self.shared_allocation_1 = _SharedAllocation(input_storage_1) self.shared_allocation_2 = _SharedAllocation(input_storage_2) max_in_num = in_num + out_num * max_link self.final_num_features = max_in_num self.saved_features = [] self.max_link = max_link super(_IntermediaBlock, self).__init__() print('creating intermedia block ...') self.adapters = [] for i in range(0, layer_num-1): if i < max_link: tmp_in_num = in_num + (i+1) * out_num else: tmp_in_num = max_in_num print('intermedia layer %d input channel number is %d' % (i, tmp_in_num)) self.adapters.append(_EfficientDensenetBottleneck(self.shared_allocation_1, self.shared_allocation_2, tmp_in_num, out_num)) self.adapters = nn.ModuleList(self.adapters) print('intermedia layer output channel number is %d' % out_num)
Example #2
Source File: cu_net_prev_version_wig.py From CU-Net with Apache License 2.0 | 6 votes |
def __init__(self, in_num, out_num, layer_num, max_link, storage_size=1024): input_storage_1 = torch.Storage(storage_size) input_storage_2 = torch.Storage(storage_size) self.shared_allocation_1 = _SharedAllocation(input_storage_1) self.shared_allocation_2 = _SharedAllocation(input_storage_2) max_in_num = in_num + out_num * max_link self.final_num_features = max_in_num self.saved_features = [] self.max_link = max_link super(_IntermediaBlock, self).__init__() print('creating intermedia block ...') self.adapters = [] for i in range(0, layer_num-1): if i < max_link: tmp_in_num = in_num + (i+1) * out_num else: tmp_in_num = max_in_num print('intermedia layer %d input channel number is %d' % (i, tmp_in_num)) self.adapters.append(_EfficientDensenetBottleneck(self.shared_allocation_1, self.shared_allocation_2, tmp_in_num, out_num)) self.adapters = nn.ModuleList(self.adapters) print('intermedia layer output channel number is %d' % out_num)
Example #3
Source File: device.py From netharn with Apache License 2.0 | 6 votes |
def _map_location(xpu, storage, location): """ Helper for `xpu.load` used when calling `torch.load` Args: storage (torch.Storage) : the initial deserialization of the storage of the data read by `torch.load`, residing on the CPU. location (str): tag identifiying the location the data being read by `torch.load` was originally saved from. Returns: torch.Storage : the storage """ if xpu.is_gpu(): return storage.cuda(xpu._main_device_id) else: return storage
Example #4
Source File: util.py From allennlp with Apache License 2.0 | 5 votes |
def device_mapping(cuda_device: int): """ In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU), you have to supply a `map_location` function. Call this with the desired `cuda_device` to get the function that `torch.load()` needs. """ def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: if cuda_device >= 0: return storage.cuda(cuda_device) else: return storage return inner_device_mapping
Example #5
Source File: nn.py From gtos with MIT License | 5 votes |
def device_mapping(cuda_device: int): """ In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU), you have to supply a `map_location` function. Call this with the desired `cuda_device` to get the function that `torch.load()` needs. """ def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument if cuda_device >= 0: return storage.cuda(cuda_device) else: return storage return inner_device_mapping
Example #6
Source File: environment.py From gtos with MIT License | 5 votes |
def device_mapping(cuda_device: int): """ In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU), you have to supply a `map_location` function. Call this with the desired `cuda_device` to get the function that `torch.load()` needs. """ def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument if cuda_device >= 0: return storage.cuda(cuda_device) else: return storage return inner_device_mapping
Example #7
Source File: efficient_densecrnn.py From efficientdensenet_crnn with MIT License | 5 votes |
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, storage_size=1024): input_storage_1 = torch.Storage(storage_size) input_storage_2 = torch.Storage(storage_size) self.final_num_features = num_input_features + (growth_rate * num_layers) self.shared_allocation_1 = _SharedAllocation(input_storage_1) self.shared_allocation_2 = _SharedAllocation(input_storage_2) super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer(self.shared_allocation_1, self.shared_allocation_2, num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) self.add_module('denselayer%d' % (i + 1), layer)
Example #8
Source File: efficient_densecrnn.py From efficientdensenet_crnn with MIT License | 5 votes |
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, storage_size=1024): input_storage_1 = torch.Storage(storage_size) input_storage_2 = torch.Storage(storage_size) self.final_num_features = num_input_features + (growth_rate * num_layers) self.shared_allocation_1 = _SharedAllocation(input_storage_1) self.shared_allocation_2 = _SharedAllocation(input_storage_2) super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer(self.shared_allocation_1, self.shared_allocation_2, num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) self.add_module('denselayer%d' % (i + 1), layer)
Example #9
Source File: dense_efficient.py From backdoor_federated_learning with MIT License | 5 votes |
def __init__(self, size): self._cpu_storage = torch.Storage(size) self._gpu_storages = [] if torch.cuda.is_available(): for device_idx in range(torch.cuda.device_count()): with torch.cuda.device(device_idx): self._gpu_storages.append(torch.Storage(size).cuda())
Example #10
Source File: nn.py From stog with MIT License | 5 votes |
def device_mapping(cuda_device: int): """ In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU), you have to supply a `map_location` function. Call this with the desired `cuda_device` to get the function that `torch.load()` needs. """ def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument if cuda_device >= 0: return storage.cuda(cuda_device) else: return storage return inner_device_mapping
Example #11
Source File: environment.py From stog with MIT License | 5 votes |
def device_mapping(cuda_device: int): """ In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU), you have to supply a `map_location` function. Call this with the desired `cuda_device` to get the function that `torch.load()` needs. """ def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument if cuda_device >= 0: return storage.cuda(cuda_device) else: return storage return inner_device_mapping
Example #12
Source File: cu_net_prev_version.py From CU-Net with Apache License 2.0 | 4 votes |
def __init__(self, in_num, neck_size, growth_rate, layer_num, max_link, storage_size=1024, requires_skip=True, is_up=False): input_storage_1 = torch.Storage(storage_size) input_storage_2 = torch.Storage(storage_size) self.shared_allocation_1 = _SharedAllocation(input_storage_1) self.shared_allocation_2 = _SharedAllocation(input_storage_2) self.saved_features = [] self.max_link = max_link self.requires_skip = requires_skip super(_DenseBlock, self).__init__() max_in_num = in_num + max_link * growth_rate self.final_num_features = max_in_num self.layers = [] print('layer number is %d' % layer_num) for i in range(0, layer_num): if i < max_link: tmp_in_num = in_num + i * growth_rate else: tmp_in_num = max_in_num print('layer %d input channel number is %d' % (i, tmp_in_num)) self.layers.append(_DenseLayer(self.shared_allocation_1, self.shared_allocation_2, tmp_in_num, neck_size, growth_rate)) self.layers = nn.ModuleList(self.layers) self.adapters_ahead = [] adapter_in_nums = [] adapter_out_num = in_num if is_up: adapter_out_num = adapter_out_num / 2 for i in range(0, layer_num): if i < max_link: tmp_in_num = in_num + (i+1) * growth_rate else: tmp_in_num = max_in_num + growth_rate adapter_in_nums.append(tmp_in_num) print('adapter %d input channel number is %d' % (i, adapter_in_nums[i])) self.adapters_ahead.append(_EfficientDensenetBottleneck(self.shared_allocation_1, self.shared_allocation_2, adapter_in_nums[i], adapter_out_num)) self.adapters_ahead = nn.ModuleList(self.adapters_ahead) print('adapter output channel number is %d' % adapter_out_num) if requires_skip: print('creating skip layers ...') self.adapters_skip = [] for i in range(0, layer_num): self.adapters_skip.append(_EfficientDensenetBottleneck(self.shared_allocation_1, self.shared_allocation_2, adapter_in_nums[i], adapter_out_num)) self.adapters_skip = nn.ModuleList(self.adapters_skip)
Example #13
Source File: cu_net_prev_version_wig.py From CU-Net with Apache License 2.0 | 4 votes |
def __init__(self, in_num, neck_size, growth_rate, layer_num, max_link, storage_size=1024, requires_skip=True, is_up=False): input_storage_1 = torch.Storage(storage_size) input_storage_2 = torch.Storage(storage_size) self.shared_allocation_1 = _SharedAllocation(input_storage_1) self.shared_allocation_2 = _SharedAllocation(input_storage_2) self.saved_features = [] self.max_link = max_link self.requires_skip = requires_skip super(_DenseBlock, self).__init__() max_in_num = in_num + max_link * growth_rate self.final_num_features = max_in_num self.layers = [] print('layer number is %d' % layer_num) for i in range(0, layer_num): if i < max_link: tmp_in_num = in_num + i * growth_rate else: tmp_in_num = max_in_num print('layer %d input channel number is %d' % (i, tmp_in_num)) self.layers.append(_DenseLayer(self.shared_allocation_1, self.shared_allocation_2, tmp_in_num, neck_size, growth_rate)) self.layers = nn.ModuleList(self.layers) self.adapters_ahead = [] adapter_in_nums = [] adapter_out_num = in_num if is_up: adapter_out_num = adapter_out_num / 2 for i in range(0, layer_num): if i < max_link: tmp_in_num = in_num + (i+1) * growth_rate else: tmp_in_num = max_in_num + growth_rate adapter_in_nums.append(tmp_in_num) print('adapter %d input channel number is %d' % (i, adapter_in_nums[i])) self.adapters_ahead.append(_EfficientDensenetBottleneck(self.shared_allocation_1, self.shared_allocation_2, adapter_in_nums[i], adapter_out_num)) self.adapters_ahead = nn.ModuleList(self.adapters_ahead) print('adapter output channel number is %d' % adapter_out_num) if requires_skip: print('creating skip layers ...') self.adapters_skip = [] for i in range(0, layer_num): self.adapters_skip.append(_EfficientDensenetBottleneck(self.shared_allocation_1, self.shared_allocation_2, adapter_in_nums[i], adapter_out_num)) self.adapters_skip = nn.ModuleList(self.adapters_skip)