Python numpy.hsplit() Examples

The following are 30 code examples of numpy.hsplit(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: main.py    From Systematic-LEDs with MIT License 6 votes vote down vote up
def visualize_wave(self, y):
        """Effect that flashes to the beat with scrolling coloured bits"""
        if self.current_freq_detects["beat"]:
            output = np.zeros((3,config.settings["devices"][self.board]["configuration"]["N_PIXELS"]))
            output[0][:]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_flash"])[0]
            output[1][:]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_flash"])[1]
            output[2][:]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_flash"])[2]
            self.wave_wipe_count = config.settings["devices"][self.board]["effect_opts"]["Wave"]["wipe_len"]
        else:
            output = np.copy(self.prev_output)
            #for i in range(len(self.prev_output)):
            #    output[i] = np.hsplit(self.prev_output[i],2)[0]
            output = np.multiply(self.prev_output,config.settings["devices"][self.board]["effect_opts"]["Wave"]["decay"])
            for i in range(self.wave_wipe_count):
                output[0][i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[0]
                output[0][-i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[0]
                output[1][i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[1]
                output[1][-i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[1]
                output[2][i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[2]
                output[2][-i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[2]
            #output = np.concatenate([output,np.fliplr(output)], axis=1)
            if self.wave_wipe_count > config.settings["devices"][self.board]["configuration"]["N_PIXELS"]//2:
                self.wave_wipe_count = config.settings["devices"][self.board]["configuration"]["N_PIXELS"]//2
            self.wave_wipe_count += config.settings["devices"][self.board]["effect_opts"]["Wave"]["wipe_speed"]
        return output 
Example #2
Source File: initData.py    From Python_DIC with Apache License 2.0 6 votes vote down vote up
def openCoordinates(directory, nbInstances, nbImages):

    zi = []
    zi_strainX = []
    zi_strainY = []
    testTime = time.time()
    coordinatesFile = getData.testReadFile(directory+'/coordinates.csv')
    if coordinatesFile is not None:
        instanceCoordinates = np.hsplit(coordinatesFile, nbInstances)
        for instance in range(nbInstances):
            try:
                imageCoordinates = np.asarray(np.vsplit(instanceCoordinates[instance], nbImages))
            except:
                return None, None, None
            zi.append(imageCoordinates[:,:,0:100])
            zi_strainX.append(imageCoordinates[:,:,100:200])
            zi_strainY.append(imageCoordinates[:,:,200:300])
        return zi, zi_strainX, zi_strainY
    else:
        return None, None, None 
Example #3
Source File: rf_NDVIEvolution.py    From python-urbanPlanning with MIT License 6 votes vote down vote up
def trainBlock(array,row,col):
    arrayShape=array.shape
    print(arrayShape)
    rowPara=divmod(arrayShape[1],row)  #divmod(a,b)方法为除法取整,以及a对b的余数
    colPara=divmod(arrayShape[0],col)
    extractArray=array[:colPara[0]*col,:rowPara[0]*row]  #移除多余部分,规范数组,使其正好切分均匀
#    print(extractArray.shape)
    hsplitArray=np.hsplit(extractArray,rowPara[0])
    vsplitArray=flatten_lst([np.vsplit(subArray,colPara[0]) for subArray in hsplitArray])
    dataBlock=flatten_lst(vsplitArray)
    print("样本量:%s"%(len(dataBlock)))  #此时切分的块数据量,就为样本数据量
    
    '''显示查看其中一个样本'''     
    subShow=dataBlock[-10]
    print(subShow,'\n',subShow.max(),subShow.std())
    fig=plt.figure(figsize=(20, 12))
    ax=fig.add_subplot(111)
    plt.xticks([x for x in range(subShow.shape[0]) if x%400==0])
    plt.yticks([y for y in range(subShow.shape[1]) if y%200==0])
    ax.imshow(subShow)    
    
    dataBlockStack=np.append(dataBlock[:-1],[dataBlock[-1]],axis=0) #将列表转换为数组
    print(dataBlockStack.shape)
    return dataBlockStack 
Example #4
Source File: Pooling.py    From EyerissF with GNU Lesser General Public License v2.1 6 votes vote down vote up
def MAXPooling(Array,activation=1, ksize=2):
    assert len(Array) % ksize == 0

    V2list = np.vsplit(Array, len(Array) / ksize)

    VerticalElements = list()
    HorizontalElements = list()

    for x in V2list:
        H2list = np.hsplit(x, len(x[0]) / ksize)
        HorizontalElements.clear()
        for y in H2list:
            # y should be a two-two square
            HorizontalElements.append(y.max())
        VerticalElements.append(np.array(HorizontalElements))

    return np.array(np.array(VerticalElements)/activation,dtype=int) 
Example #5
Source File: von_mises_stress.py    From fenics-topopt with MIT License 6 votes vote down vote up
def calculate_diff_stress(self, x, u, nu, side=1):
        """
        Calculate the derivative of the Von Mises stress given the densities x,
        displacements u, and young modulus nu. Optionally, provide the side
        length (default: 1).
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3)
        drho = self.diff_penalized_densities(x)
        ds11, ds22, ds12 = numpy.hsplit(
            ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        if abs(vm_stress).sum() > 1e-8:
            dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 -
                ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12))
            return dvm_stress
        return 0 
Example #6
Source File: test_vecm.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def test_var_rep():
    if debug_mode:
        if "VAR repr. A" not in to_test:  # pragma: no cover
            return
        print("\n\nVAR REPRESENTATION", end="")
    for ds in datasets:
        for dt in ds.dt_s_list:
            if debug_mode:
                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")

            exog = (results_sm_exog[ds][dt].exog is not None)
            exog_coint = (results_sm_exog_coint[ds][dt].exog_coint is not None)

            err_msg = build_err_msg(ds, dt, "VAR repr. A")
            obtained = results_sm[ds][dt].var_rep
            obtained_exog = results_sm_exog[ds][dt].var_rep
            obtained_exog_coint = results_sm_exog_coint[ds][dt].var_rep
            p = obtained.shape[0]
            desired = np.hsplit(results_ref[ds][dt]["est"]["VAR A"], p)
            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
            if exog:
                assert_equal(obtained_exog, obtained, "WITH EXOG" + err_msg)
            if exog_coint:
                assert_equal(obtained_exog_coint, obtained, "WITH EXOG_COINT" + err_msg) 
Example #7
Source File: von_mises_stress.py    From fenics-topopt with MIT License 6 votes vote down vote up
def calculate_diff_stress(self, x, u, nu, side=1):
        """
        Calculate the derivative of the Von Mises stress given the densities x,
        displacements u, and young modulus nu. Optionally, provide the side
        length (default: 1).
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3)
        drho = self.diff_penalized_densities(x)
        ds11, ds22, ds12 = numpy.hsplit(
            ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        if abs(vm_stress).sum() > 1e-8:
            dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 -
                ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12))
            return dvm_stress
        return 0 
Example #8
Source File: cluster_corr.py    From altanalyze with Apache License 2.0 6 votes vote down vote up
def find_closest_cluster(query, ref, min_correlation=-1):
    """
    For each collection in query, identifies the collection in ref that is most similar

    query and ref are both dictionaries of CellCollections, keyed by a "partition id"

    Returns a list containing the best matches for each collection in query that meet the 
    min_correlation threshold.  Each member of the list is itself a list containing the 
    id of the query collection and the id of its best match in ref
    """
    query_centroids, query_ids = compute_centroids(query)
    ref_centroids, ref_ids = compute_centroids(ref)
    print('number of reference partions %d, number of query partions %d' % (len(ref_ids),len(query_ids)))
    all_correlations = np.corrcoef(np.concatenate((ref_centroids, query_centroids), axis=1), rowvar=False)

    # At this point, we have the correlations of everything vs everything.  We only care about query vs ref
    # Extract the top-right corner of the matrix
    nref = len(ref)
    corr = np.hsplit(np.vsplit(all_correlations, (nref, ))[0], (nref,))[1]
    best_match = zip(range(corr.shape[1]), np.argmax(corr, 0))
    # At this point, best_match is: 1) using indices into the array rather than ids, 
    # and 2) not restricted by the threshold.  Fix before returning
    return ( (query_ids[q], ref_ids[r]) for q, r in best_match if corr[r,q] >= min_correlation ) 
Example #9
Source File: svm_handwritten_digits_recognition_preprocessing_hog.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #10
Source File: ds_utils.py    From refinedet.pytorch with MIT License 6 votes vote down vote up
def bbox_overlaps(bboxes, ref_bboxes):
    """
    ref_bboxes: N x 4;
    bboxes: K x 4

    return: K x N
    """
    refx1, refy1, refx2, refy2 = np.vsplit(np.transpose(ref_bboxes), 4)
    x1, y1, x2, y2 = np.hsplit(bboxes, 4)
    
    minx = np.maximum(refx1, x1)
    miny = np.maximum(refy1, y1)
    maxx = np.minimum(refx2, x2)
    maxy = np.minimum(refy2, y2)
    
    inter_area = (maxx - minx + 1) * (maxy - miny + 1)
    ref_area = (refx2 - refx1 + 1) * (refy2 - refy1 + 1)
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    iou = inter_area / (ref_area + area - inter_area)
    
    return iou 
Example #11
Source File: svm_handwritten_digits_recognition_preprocessing_hog_c_gamma.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #12
Source File: knn_handwritten_digits_recognition_introduction.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #13
Source File: knn_handwritten_digits_recognition_k_training_testing_preprocessing.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #14
Source File: attention_allocation.py    From ml-fairness-gym with Apache License 2.0 6 votes vote down vote up
def _sample_incidents(rng, params):
  """Generates new crimeincident occurrences across locations.

  Args:
    rng: A numpy RandomState() object acting as a random number generator.
    params: A Params instance for this environment.

  Returns:
    incidents_occurred: a list of integers of number of incidents for each
    location.
    that could be discovered by attention.
    reported_incidents: a list of integers of a number of incidents reported
    directly.
  """
  # pylint: disable=g-complex-comprehension
  crimes = [
      rng.poisson([
          params.incident_rates[i] * params.discovered_incident_weight,
          params.incident_rates[i] * params.reported_incident_weight
      ]) for i in range(params.n_locations)
  ]
  incidents_occurred, reported_incidents = np.hsplit(np.asarray(crimes), 2)
  return incidents_occurred.flatten(), reported_incidents.flatten() 
Example #15
Source File: knn_handwritten_digits_recognition_k_training_testing_preprocessing_hog.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #16
Source File: space_test.py    From bayesmark with Apache License 2.0 6 votes vote down vote up
def test_joint_space_warp_missing(args):
    meta, X, _, fixed_vars = args

    S = sp.JointSpace(meta)

    X_w = S.warp([fixed_vars])
    assert X_w.dtype == sp.WARPED_DTYPE

    # Test bounds
    lower, upper = S.get_bounds().T
    assert np.all((lower <= X_w) | np.isnan(X_w))
    assert np.all((X_w <= upper) | np.isnan(X_w))

    for param, xx in zip(S.param_list, np.hsplit(X_w, S.blocks[:-1])):
        xx, = xx
        if param in fixed_vars:
            x_orig = S.spaces[param].unwarp(xx).item()
            S.spaces[param].validate(x_orig)
            assert close_enough(x_orig, fixed_vars[param])

            # check other direction
            x_w2 = S.spaces[param].warp(fixed_vars[param])
            assert close_enough(xx, x_w2)
        else:
            assert np.all(np.isnan(xx)) 
Example #17
Source File: inception_score.py    From BigGAN-TPU-TensorFlow with MIT License 6 votes vote down vote up
def test_debug(self):
		image = imageio.imread("./temp/dump.png")
		grid_n = 6
		img_size = image.shape[1] // grid_n
		img_ch = image.shape[-1]

		images = np.vsplit(image, grid_n)
		images = [np.hsplit(i, grid_n) for i in images]
		images = np.reshape(np.array(images), [grid_n*grid_n, img_size, img_size, img_ch])

		with tf.Graph().as_default():
			with tf.Session() as sess:
				v_images_placeholder = tf.placeholder(dtype=tf.float32)
				v_images = tf.contrib.gan.eval.preprocess_image(v_images_placeholder)
				v_logits = tf.contrib.gan.eval.run_inception(v_images)
				v_score = tf.contrib.gan.eval.classifier_score_from_logits(v_logits)
				score, logits = sess.run([v_score, v_logits], feed_dict={v_images_placeholder:images})


		imageio.imwrite("./temp/inception_logits.png", logits) 
Example #18
Source File: knn_handwritten_digits_recognition_k_training_testing.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #19
Source File: LST.py    From python-urbanPlanning with MIT License 5 votes vote down vote up
def trainBlock(self,array,row,col):
        arrayShape=array.shape
        print(arrayShape)
        rowPara=divmod(arrayShape[1],row)  #divmod(a,b)方法为除法取整,以及a对b的余数
        colPara=divmod(arrayShape[0],col)
        extractArray=array[:colPara[0]*col,:rowPara[0]*row]  #移除多余部分,规范数组,使其正好切分均匀
    #    print(extractArray.shape)
        hsplitArray=np.hsplit(extractArray,rowPara[0])
        flatten_lst=lambda lst: [m for n_lst in lst for m in flatten_lst(n_lst)] if type(lst) is list else [lst]
        vsplitArray=flatten_lst([np.vsplit(subArray,colPara[0]) for subArray in hsplitArray])
        dataBlock=flatten_lst(vsplitArray)
        print("样本量:%s"%(len(dataBlock)))  #此时切分的块数据量,就为样本数据量
        
        '''显示查看其中一个样本'''     
        subShow=dataBlock[-2]
        print(subShow,'\n',subShow.max(),subShow.std())
        fig=plt.figure(figsize=(20, 12))
        ax=fig.add_subplot(111)
        plt.xticks([x for x in range(subShow.shape[0]) if x%400==0])
        plt.yticks([y for y in range(subShow.shape[1]) if y%200==0])
        ax.imshow(subShow)    
        
        dataBlockStack=np.append(dataBlock[:-1],[dataBlock[-1]],axis=0) #将列表转换为数组
        print(dataBlockStack.shape)
        return dataBlockStack    
    
#主程序:数据准备/预处理 
Example #20
Source File: digits.py    From PyCV-time with MIT License 5 votes vote down vote up
def split2d(img, cell_size, flatten=True):
    h, w = img.shape[:2]
    sx, sy = cell_size
    cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
    cells = np.array(cells)
    if flatten:
        cells = cells.reshape(-1, sy, sx)
    return cells 
Example #21
Source File: RunEVDeblurNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def GenerateBatch(IBuffer, PatchSize):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    IBatch = []

    # Generate random image
    if(np.shape(IBuffer)[1]>=246):
        IBuffer = np.hsplit(IBuffer, 2)
        I = IBuffer[0]
    else:
        I = IBuffer
    
    # Homography and Patch generation 
    IPatch = I
    # IOriginal, IPatch, AllPts, Mask = GenerateRandPatch(I, PatchSize, Vis=False)
    
    # Normalize Dataset
    # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
    IS = iu.StandardizeInputs(np.float32(IPatch))
    
    # Append All Images and Mask
    IBatch.append(IS)

    # IBatch is the Original Image I1 Batch
    return IBatch 
Example #22
Source File: test_mesh.py    From geoist with MIT License 5 votes vote down vote up
def test_z_split_y():
    "model.split along y vs numpy.hsplit splits the z array correctly"
    area = [-1000., 1000., -2000., 0.]
    shape = (20, 21)
    xp, yp = gridder.regular(area, shape)
    zp = 100*np.arange(xp.size)
    model = PointGrid(area, zp, shape)
    subshape = (1, 3)
    submodels = model.split(subshape)
    temp = np.hsplit(np.reshape(zp, shape), subshape[1])
    diff = []
    for i in range(subshape[1]):
        diff.append(np.all((submodels[i].z - temp[i].ravel()) == 0.))
    assert np.alltrue(diff) 
Example #23
Source File: RunEVSegNet.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def GenerateBatch(IBuffer, PatchSize):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    IBatch = []

    # Generate random image
    IBuffer = np.hsplit(IBuffer, 2)
    I1 = IBuffer[0]
    I2 = IBuffer[1]
    # I = IBuffer

    # Homography and Patch generation 
    IPatch = np.dstack((I1, I2))
    # IOriginal, IPatch, AllPts, Mask = GenerateRandPatch(I, PatchSize, Vis=False)
    
    # Normalize Dataset
    # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
    IS = iu.StandardizeInputs(np.float32(IPatch))
    
    # Append All Images and Mask
    IBatch.append(IS)

    # IBatch is the Original Image I1 Batch
    return IBatch 
Example #24
Source File: cut.py    From ustc-grade-automatic-notification with GNU Affero General Public License v3.0 5 votes vote down vote up
def cut(filename):
    img = cv2.imread(filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    final = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
    cells = np.hsplit(final, 4)
    for i in range(4):
        cv2.imwrite(filename.split('.')[0] + str(i) + '.jpg', cells[i]) 
Example #25
Source File: gather.py    From ustc-grade-automatic-notification with GNU Affero General Public License v3.0 5 votes vote down vote up
def downpic(filename):
    r = requests.get('http://mis.teach.ustc.edu.cn/randomImage.do')
    img_array = np.asarray(bytearray(r.content), dtype=np.uint8)
    img = cv2.imdecode(img_array, -1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    final = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
    cells = np.hsplit(final, 4)
    for i in range(4):
        cv2.imwrite(str(filename+i)+'.jpg', cells[i]) 
Example #26
Source File: newknn.py    From ustc-grade-automatic-notification with GNU Affero General Public License v3.0 5 votes vote down vote up
def hack(self, img):
        test_img_array = np.asarray(bytearray(img), dtype=np.uint8)
        test_img = cv2.imdecode(test_img_array, -1)
        test_gray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
        test_final = cv2.threshold(test_gray, 100, 255, cv2.THRESH_BINARY)[1]
        test_cells = np.array([i.reshape(-1).astype(np.float32)
                            for i in np.hsplit(test_final, 4)])
        ret, result, neighbours, dist = self.knn.find_nearest(test_cells, k=1)
        result = result.reshape(-1)
        letter = []
        for i in result:
            letter.append(chr(i))
        return ''.join(letter) 
Example #27
Source File: von_mises_stress.py    From fenics-topopt with MIT License 5 votes vote down vote up
def calculate_principle_stresses(self, x, u, nu, side=1):
        """
        Calculate the principle stresses in the x, y, and shear directions.
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        stress = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        stress *= rho / float(u.shape[1])
        return numpy.hsplit(stress.T, 3) 
Example #28
Source File: array.py    From dislib with Apache License 2.0 5 votes vote down vote up
def _split_block(block, tl_shape, reg_shape, out_blocks):
    """ Splits a block into new blocks following the ds-array typical scheme
    with a top left block, regular blocks in the middle and remainder blocks
    at the edges """
    vsplit = range(tl_shape[0], block.shape[0], reg_shape[0])
    hsplit = range(tl_shape[1], block.shape[1], reg_shape[1])

    for i, rows in enumerate(np.vsplit(block, vsplit)):
        for j, cols in enumerate(np.hsplit(rows, hsplit)):
            out_blocks[i][j] = cols 
Example #29
Source File: powdersim.py    From scikit-ued with MIT License 5 votes vote down vote up
def powdersim(crystal, q, fwhm_g=0.03, fwhm_l=0.06, **kwargs):
    """
    Simulates polycrystalline diffraction pattern.

    Parameters
    ----------
    crystal : `skued.structure.Crystal`
        Crystal from which to diffract.
    q : `~numpy.ndarray`, shape (N,)
        Range of scattering vector norm over which to compute the diffraction pattern [1/Angs].
    fwhm_g, fwhm_l : float, optional
        Full-width at half-max of the Gaussian and Lorentzian parts of the Voigt profile.
        See `skued.pseudo_voigt` for more details.

    Returns
    -------
    pattern : `~numpy.ndarray`, shape (N,)
        Diffraction pattern
    """
    refls = np.vstack(tuple(crystal.bounded_reflections(q.max())))
    h, k, l = np.hsplit(refls, 3)
    Gx, Gy, Gz = change_basis_mesh(
        h, k, l, basis1=crystal.reciprocal_vectors, basis2=np.eye(3)
    )
    qs = np.sqrt(Gx ** 2 + Gy ** 2 + Gz ** 2)
    intensities = np.absolute(structure_factor(crystal, h, k, l)) ** 2

    pattern = np.zeros_like(q)
    for qi, i in zip(qs, intensities):
        pattern += i * pseudo_voigt(q, qi, fwhm_g, fwhm_l)

    return pattern 
Example #30
Source File: digits.py    From PyCV-time with MIT License 5 votes vote down vote up
def split2d(img, cell_size, flatten=True):
    h, w = img.shape[:2]
    sx, sy = cell_size
    cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
    cells = np.array(cells)
    if flatten:
        cells = cells.reshape(-1, sy, sx)
    return cells