Python caffe.TRAIN Examples

The following are 27 code examples of caffe.TRAIN(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe , or try the search function .
Example #1
Source File: CaffeUNet_3D.py    From peters-stuff with GNU General Public License v3.0 6 votes vote down vote up
def add_batchnormscale(self, input, name):

        if True: # necessary?
            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': True}
            param = [dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name + '_bn', l)

            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': False}
            l = L.BatchNorm(input, name=name + '_bn', top=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name + '_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name + '_bn'), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)
        else: # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l 
Example #2
Source File: CaffeUNet_2D.py    From peters-stuff with GNU General Public License v3.0 6 votes vote down vote up
def add_batchnormscale(self, input, name):

        if True : # necessary?
            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': True }
            param = [dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name+'_bn', l)

            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': False }
            l = L.BatchNorm(input, name=name+'_bn', top=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name+'_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name+'_bn'), scale_param = { 'bias_term': True } )
            setattr(self.net_spec, name, l)
        else : # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l 
Example #3
Source File: test_net.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 6 votes vote down vote up
def test_save_and_read(self):
        f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
        f.close()
        self.net.save(f.name)
        net_file = simple_net_file(self.num_output)
        # Test legacy constructor
        #   should print deprecation warning
        caffe.Net(net_file, f.name, caffe.TRAIN)
        # Test named constructor
        net2 = caffe.Net(net_file, caffe.TRAIN, weights=f.name)
        os.remove(net_file)
        os.remove(f.name)
        for name in self.net.params:
            for i in range(len(self.net.params[name])):
                self.assertEqual(abs(self.net.params[name][i].data
                    - net2.params[name][i].data).sum(), 0) 
Example #4
Source File: inference.py    From nideep with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def response_to_lmdb(fpath_net,
                     fpath_weights,
                     keys,
                     dst_prefix,
                     modes=None,
                     ):
    """
    keys -- name of responses to extract. Must be valid for all requested modes
    """
    modes = modes or [caffe.TRAIN, caffe.TEST]
    out = dict.fromkeys(modes)

    for m in modes:
        num_passes = est_min_num_fwd_passes(fpath_net, ['train', 'test'][m])
        out[m] = infer_to_lmdb(caffe.Net(fpath_net, fpath_weights, m),
                               keys,
                               num_passes,
                               dst_prefix + '%s_' + ['train', 'test'][m] + '_lmdb')
    return out 
Example #5
Source File: data.py    From pynetbuilder with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, params):
        if params['include'] == 'test':
            params['include'] = dict(phase=caffe.TEST)
        elif params['include'] == 'train':
            params['include'] = dict(phase=caffe.TRAIN)
        params['image_data_param'] = dict(source=params['source'] ,
                                          batch_size=params['batch_size'])
        if 'mean_file' in params:
            params['transform_param'] = dict(mean_file=params['mean_file'])
        self._required = ['name', 'source', 'batch_size', 'include']
        super(ImageDataLego, self).__init__(params) 
Example #6
Source File: unet_segmentation_no_db_example.py    From peters-stuff with GNU General Public License v3.0 5 votes vote down vote up
def print_network_sizes(model_file) :
    net = caffe.Net(model_file, caffe.TRAIN)
    for k, v in net.blobs.items():
        print k, v.data.shape 
Example #7
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_caffe_import(self):
        # Test 1
        data, label = L.ImageData(source='/dummy/source/', batch_size=32, ntop=2, rand_skip=0,
                                  shuffle=False, new_height=256, new_width=256, is_color=False,
                                  root_folder='/dummy/folder/',
                                  transform_param=dict(crop_size=227, mean_value=[104, 117, 123],
                                                       mirror=True, force_color=False,
                                                       force_gray=False))
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
            f.write(str(to_proto(data, label)))
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
        response = self.client.post(reverse('caffe-import'), {'file': sample_file})
        response = json.loads(response.content)
        os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
        self.assertGreaterEqual(len(response['net']['l0']['params']), 13)
        self.assertEqual(response['result'], 'success')
        # Test 2
        data, label = L.ImageData(source='/dummy/source/', batch_size=32, ntop=2, rand_skip=0,
                                  shuffle=False, new_height=256, new_width=256, is_color=False,
                                  root_folder='/dummy/folder/', include=dict(phase=caffe.TRAIN),
                                  transform_param=dict(crop_size=227, mean_file='/path/to/file',
                                                       mirror=True, force_color=False,
                                                       force_gray=False))
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
            f.write(str(to_proto(data, label)))
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
        response = self.client.post(reverse('caffe-import'), {'file': sample_file})
        response = json.loads(response.content)
        os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
        self.assertGreaterEqual(len(response['net']['l0']['params']), 13)
        self.assertEqual(response['result'], 'success') 
Example #8
Source File: caffe_mat_transform.py    From automatic-portrait-tf with GNU General Public License v3.0 5 votes vote down vote up
def main():
    net = caffe.Net(MODEL_DEF, MODEL_WEIGHT, caffe.TRAIN)

    mat = []
    for i in range(len(net.layers)):
        mat_type = net.layers[i].type
        mat_data = []
        for j in range(len(net.layers[i].blobs)):
            mat_data.append(net.layers[i].blobs[j].data)
        mat.append((mat_type, mat_data))

    dt = np.dtype([('type', np.str_, 16), ('data', np.ndarray)])
    results = np.array(mat, dtype=dt)
    results.dump(MAT_RESULT) 
Example #9
Source File: draw_net.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def main():
    args = parse_args()
    net = caffe_pb2.NetParameter()
    text_format.Merge(open(args.input_net_proto_file).read(), net)
    print('Drawing net to %s' % args.output_image_file)
    phase=None;
    if args.phase == "TRAIN":
        phase = caffe.TRAIN
    elif args.phase == "TEST":
        phase = caffe.TEST
    elif args.phase != "ALL":
        raise ValueError("Unknown phase: " + args.phase)
    caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir,
                                phase) 
Example #10
Source File: draw_net.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def parse_args():
    """Parse input arguments
    """

    parser = ArgumentParser(description=__doc__,
                            formatter_class=ArgumentDefaultsHelpFormatter)

    parser.add_argument('input_net_proto_file',
                        help='Input network prototxt file')
    parser.add_argument('output_image_file',
                        help='Output image file')
    parser.add_argument('--rankdir',
                        help=('One of TB (top-bottom, i.e., vertical), '
                              'RL (right-left, i.e., horizontal), or another '
                              'valid dot option; see '
                              'http://www.graphviz.org/doc/info/'
                              'attrs.html#k:rankdir'),
                        default='LR')
    parser.add_argument('--phase',
                        help=('Which network phase to draw: can be TRAIN, '
                              'TEST, or ALL.  If ALL, then all layers are drawn '
                              'regardless of phase.'),
                        default="ALL")

    args = parser.parse_args()
    return args 
Example #11
Source File: test_python_layer.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def test_phase(self):
        net_file = phase_net_file()
        for phase in caffe.TRAIN, caffe.TEST:
            net = caffe.Net(net_file, phase)
            self.assertEqual(net.forward()['phase'], phase) 
Example #12
Source File: test_python_layer.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def test_parameter(self):
        net_file = parameter_net_file()
        net = caffe.Net(net_file, caffe.TRAIN)
        # Test forward and backward
        net.forward()
        net.backward()
        layer = net.layers[list(net._layer_names).index('layer')]
        self.assertEqual(layer.blobs[0].data[0], 0)
        self.assertEqual(layer.blobs[0].diff[0], 1)
        layer.blobs[0].data[0] += layer.blobs[0].diff[0]
        self.assertEqual(layer.blobs[0].data[0], 1)

        # Test saving and loading
        h, caffemodel_file = tempfile.mkstemp()
        net.save(caffemodel_file)
        layer.blobs[0].data[0] = -1
        self.assertEqual(layer.blobs[0].data[0], -1)
        net.copy_from(caffemodel_file)
        self.assertEqual(layer.blobs[0].data[0], 1)
        os.remove(caffemodel_file)
        
        # Test weight sharing
        net2 = caffe.Net(net_file, caffe.TRAIN)
        net2.share_with(net)
        layer = net.layers[list(net2._layer_names).index('layer')]
        self.assertEqual(layer.blobs[0].data[0], 1)

        os.remove(net_file) 
Example #13
Source File: test_net.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def test_train(self):
        net = caffe.Net(self.f.name, caffe.TRAIN, stages=['train'])
        self.check_net(net, ['loss']) 
Example #14
Source File: test_net.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def test_save_hdf5(self):
        f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
        f.close()
        self.net.save_hdf5(f.name)
        net_file = simple_net_file(self.num_output)
        net2 = caffe.Net(net_file, caffe.TRAIN)
        net2.load_hdf5(f.name)
        os.remove(net_file)
        os.remove(f.name)
        for name in self.net.params:
            for i in range(len(self.net.params[name])):
                self.assertEqual(abs(self.net.params[name][i].data
                    - net2.params[name][i].data).sum(), 0) 
Example #15
Source File: test_net.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def setUp(self):
        self.num_output = 13
        net_file = simple_net_file(self.num_output)
        self.net = caffe.Net(net_file, caffe.TRAIN)
        # fill in valid labels
        self.net.blobs['label'].data[...] = \
                np.random.randint(self.num_output,
                    size=self.net.blobs['label'].data.shape)
        os.remove(net_file) 
Example #16
Source File: test_python_layer_with_param_str.py    From Deep-Learning-Based-Structural-Damage-Detection with MIT License 5 votes vote down vote up
def setUp(self):
        net_file = python_param_net_file()
        self.net = caffe.Net(net_file, caffe.TRAIN)
        os.remove(net_file) 
Example #17
Source File: test_inference.py    From nideep with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_response_to_lmdb(self, mock_net, mock_num):

        # fake minimal test data
        b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}

        # mock methods and properties of Net objects
        mock_num.return_value = 3
        mock_net.return_value.forward.return_value = np.zeros(1)
        type(mock_net.return_value).blobs = PropertyMock(return_value=b)
        net = mock_net()

        dst_prefix = os.path.join(self.dir_tmp, 'test_response_to_lmdb_')
        for m in ['train', 'test']:
            for k in b.keys():
                assert_false(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
        import nideep
        out = nideep.eval.inference.response_to_lmdb("net.prototxt",
                                                     "w.caffemodel",
                                                     ['x', 'z'],
                                                     dst_prefix)

        assert_equal(net.forward.call_count, 3 * 2)  # double for both modes
        from caffe import TRAIN, TEST
        assert_list_equal(out.keys(), [TRAIN, TEST])
        assert_list_equal(out[TRAIN], [3 * 4] * 2)
        assert_list_equal(out[TEST], [3 * 4] * 2)

        for m in ['train', 'test']:
            for k in b.keys():
                if k in ['x', 'z']:
                    assert_true(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
                else:
                    assert_false(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k)) 
Example #18
Source File: inference.py    From nideep with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def est_min_num_fwd_passes(fpath_net, mode_str, key=None):
    """
    if multiple source for same mode, base num_passes on last
    fpath_net -- path to network definition
    mode_str -- train or test?

    return
    minimum no. of forward passes to cover training set
    """
    from nideep.proto.proto_utils import Parser
    mode_num = {'train' : caffe.TRAIN,
                'test' : caffe.TEST}[mode_str]
    np = Parser().from_net_params_file(fpath_net)
    num_passes_each = []
    for l in np.layer:
        if 'data' in l.type.lower():
            if ('hdf5data' in l.type.lower() and
                    (mode_str.lower() in l.hdf5_data_param.source.lower() or
                        [x.phase for x in l.include] == [mode_num])):
                num_entries = CreateDatasource.from_path(l.hdf5_data_param.source, key=key).num_entries()
                num_passes = int(num_entries / l.hdf5_data_param.batch_size)
                if num_entries % l.hdf5_data_param.batch_size != 0:
                    logger.warning("db size not a multiple of batch size. Adding another fwd. pass.")
                    num_passes += 1
                logger.info("%d fwd. passes with batch size %d" % (num_passes, l.hdf5_data_param.batch_size))
                num_passes_each.append(num_passes)
            elif (mode_str.lower() in l.data_param.source.lower() or
                    [x.phase for x in l.include] == [mode_num]):
                num_entries = CreateDatasource.from_path(l.data_param.source, key=key).num_entries()
                num_passes = int(num_entries / l.data_param.batch_size)
                if num_entries % l.data_param.batch_size != 0:
                    logger.warning("db size not a multiple of batch size. Adding another fwd. pass.")
                    num_passes += 1
                logger.info("%d fwd. passes with batch size %d" % (num_passes, l.data_param.batch_size))
                num_passes_each.append(num_passes)
    return max(num_passes_each) 
Example #19
Source File: test_python_layer.py    From mix-and-match with MIT License 5 votes vote down vote up
def setUp(self):
        net_file = python_net_file()
        self.net = caffe.Net(net_file, caffe.TRAIN)
        os.remove(net_file) 
Example #20
Source File: test_net.py    From mix-and-match with MIT License 5 votes vote down vote up
def test_save_and_read(self):
        f = tempfile.NamedTemporaryFile(delete=False)
        f.close()
        self.net.save(f.name)
        net_file = simple_net_file(self.num_output)
        net2 = caffe.Net(net_file, f.name, caffe.TRAIN)
        os.remove(net_file)
        os.remove(f.name)
        for name in self.net.params:
            for i in range(len(self.net.params[name])):
                self.assertEqual(abs(self.net.params[name][i].data
                    - net2.params[name][i].data).sum(), 0) 
Example #21
Source File: test_net.py    From mix-and-match with MIT License 5 votes vote down vote up
def setUp(self):
        self.num_output = 13
        net_file = simple_net_file(self.num_output)
        self.net = caffe.Net(net_file, caffe.TRAIN)
        # fill in valid labels
        self.net.blobs['label'].data[...] = \
                np.random.randint(self.num_output,
                    size=self.net.blobs['label'].data.shape)
        os.remove(net_file) 
Example #22
Source File: draw_caffe_net.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def main():
    args = parse_args()
    net = caffe_pb2.NetParameter()
    text_format.Merge(open(args.input_net_proto_file).read(), net)
    print('Drawing net to %s' % args.output_image_file)
    phase=None;
    if args.phase == "TRAIN":
        phase = caffe.TRAIN
    elif args.phase == "TEST":
        phase = caffe.TEST
    elif args.phase != "ALL":
        raise ValueError("Unknown phase: " + args.phase)
    caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir,
                                phase) 
Example #23
Source File: draw_caffe_net.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def parse_args():
    """Parse input arguments
    """

    parser = ArgumentParser(description=__doc__,
                            formatter_class=ArgumentDefaultsHelpFormatter)

    parser.add_argument('--input_net_proto_file',
                        help='Input network prototxt file')
    parser.add_argument('--output_image_file',
                        help='Output image file')
    parser.add_argument('--rankdir',
                        help=('One of TB (top-bottom, i.e., vertical), '
                              'RL (right-left, i.e., horizontal), or another '
                              'valid dot option; see '
                              'http://www.graphviz.org/doc/info/'
                              'attrs.html#k:rankdir'),
                        default='LR')
    parser.add_argument('--phase',
                        help=('Which network phase to draw: can be TRAIN, '
                              'TEST, or ALL.  If ALL, then all layers are drawn '
                              'regardless of phase.'),
                        default="ALL")

    args = parser.parse_args()
    return args 
Example #24
Source File: CaffeUNet_2D.py    From peters-stuff with GNU General Public License v3.0 5 votes vote down vote up
def print_network_sizes(self, model_file):

        net = caffe.Net(model_file, caffe.TRAIN)
        for k, v in net.blobs.items():
            print k, v.data.shape 
Example #25
Source File: CaffeUNet_3D.py    From peters-stuff with GNU General Public License v3.0 5 votes vote down vote up
def print_network_sizes(self, model_file):

        net = caffe.Net(model_file, caffe.TRAIN)
        for k, v in net.blobs.items():
            print k, v.data.shape 
Example #26
Source File: caffe_emitter.py    From MMdnn with MIT License 4 votes vote down vote up
def end_code(self):
        return """    return n

def make_net(prototxt):
    n = KitModel()
    with open(prototxt, 'w') as fpb:
        print(n.to_proto(), file=fpb)

def gen_weight(weight_file, model, prototxt):
    global __weights_dict
    __weights_dict = load_weights(weight_file)

    net = caffe.Net(prototxt, caffe.TRAIN)

    for key in __weights_dict:
        if 'weights' in __weights_dict[key]:
            net.params[key][0].data.flat = __weights_dict[key]['weights']
        elif 'mean' in __weights_dict[key]:
            net.params[key][0].data.flat = __weights_dict[key]['mean']
            net.params[key][1].data.flat = __weights_dict[key]['var']
            if 'scale' in __weights_dict[key]:
                net.params[key][2].data.flat = __weights_dict[key]['scale']
        elif 'scale' in __weights_dict[key]:
            net.params[key][0].data.flat = __weights_dict[key]['scale']
        if 'bias' in __weights_dict[key]:
            net.params[key][1].data.flat = __weights_dict[key]['bias']
        if 'gamma' in __weights_dict[key]: # used for prelu, not sure if other layers use this too
            net.params[key][0].data.flat = __weights_dict[key]['gamma']
    net.save(model)
    return net



if __name__=='__main__':
    parser = argparse.ArgumentParser(description='Generate caffe model and prototxt')
    parser.add_argument('--weight_file', '-w', type=_text_type, default='IR weight file')
    parser.add_argument('--prototxt', '-p', type=_text_type, default='caffe_converted.prototxt')
    parser.add_argument('--model', '-m', type=_text_type, default='caffe_converted.caffemodel')
    args = parser.parse_args()
    # For some reason argparser gives us unicode, so we need to conver to str first
    make_net(str(args.prototxt))
    gen_weight(str(args.weight_file), str(args.model), str(args.prototxt))

""" 
Example #27
Source File: predictor_caffe.py    From visual_dynamics with MIT License 4 votes vote down vote up
def train(self, train_hdf5_fname, val_hdf5_fname=None, solverstate_fname=None, solver_param=None, batch_size=32, visualize_response_maps=False):
        hdf5_txt_fnames = []
        for hdf5_fname in [train_hdf5_fname, val_hdf5_fname]:
            if hdf5_fname is not None:
                head, tail = os.path.split(hdf5_fname)
                root, _ = os.path.splitext(tail)
                hdf5_txt_fname = os.path.join(head, '.' + root + '.txt')
                if not os.path.isfile(hdf5_txt_fname):
                    with open(hdf5_txt_fname, 'w') as f:
                        f.write(hdf5_fname + '\n')
                hdf5_txt_fnames.append(hdf5_txt_fname)
            else:
                hdf5_txt_fnames.append(None)
        train_hdf5_txt_fname, val_hdf5_txt_fname = hdf5_txt_fnames

        input_shapes = (self.x_shape, self.u_shape)
        train_net_param, weight_fillers = self.net_func(input_shapes, train_hdf5_txt_fname, batch_size, self.net_name, phase=caffe.TRAIN)
        if val_hdf5_fname is not None:
            val_net_param, _ = self.net_func(input_shapes, val_hdf5_txt_fname, batch_size, self.net_name, phase=caffe.TEST)

        self.train_val_net_param = train_net_param
        if val_hdf5_fname is not None:
            layers = [layer for layer in self.train_val_net_param.layer]
            # remove layers except for data layers
            for layer in layers:
                if 'Data' not in layer.type:
                    self.train_val_net_param.layer.remove(layer)
            # add data layers from validation net_caffe
            self.train_val_net_param.layer.extend([layer for layer in val_net_param.layer if 'Data' in layer.type])
            # add back the layers that are not data layers
            self.train_val_net_param.layer.extend([layer for layer in layers if 'Data' not in layer.type])
        self.train_val_net_param = net_caffe.train_val_net(self.train_val_net_param)
        train_val_fname = self.get_model_fname('train_val')
        with open(train_val_fname, 'w') as f:
            f.write(str(self.train_val_net_param))

        if solver_param is None:
            solver_param = pb2.SolverParameter()
        self.add_default_parameters(solver_param, val_net=val_hdf5_fname is not None)

        solver_fname = self.get_model_fname('solver')
        with open(solver_fname, 'w') as f:
            f.write(str(solver_param))

        solver = caffe.get_solver(solver_fname)
        self.set_weight_fillers(solver.net.params, weight_fillers)
        for param_name, param in self.params.items():
            for blob, solver_blob in zip(param, solver.net.params[param_name]):
                solver_blob.data[...] = blob.data
        if solverstate_fname is not None:
            if not solverstate_fname.endswith('.solverstate'):
                solverstate_fname = self.get_snapshot_prefix() + '_iter_' + solverstate_fname + '.solverstate'
            solver.restore(solverstate_fname)
        self.solve(solver, solver_param, visualize_response_maps=visualize_response_maps)
        for param_name, param in self.params.items():
            for blob, solver_blob in zip(param, solver.net.params[param_name]):
                blob.data[...] = solver_blob.data

        self.train_net = solver.net
        if val_hdf5_fname is not None:
            self.val_net = solver.test_nets[0]