Python read obj

41 Python code examples are found related to " read obj". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: obj.py    From pytorch_geometric with MIT License 9 votes vote down vote up
def read_obj(in_file):
    vertices = []
    faces = []

    for k, v in yield_file(in_file):
        if k == 'v':
            vertices.append(v)
        elif k == 'f':
            for i in v:
                faces.append(i)

    if not len(faces) or not len(vertices):
        return None

    pos = torch.tensor(vertices, dtype=torch.float)
    face = torch.tensor(faces, dtype=torch.long).t().contiguous()

    data = Data(pos=pos, face=face)

    return data 
Example 2
Source File: common.py    From OpenBookQA with Apache License 2.0 6 votes vote down vote up
def read_cn5_concat_subj_rel_obj_from_json(input_file):
    """
    Reads conceptnet json and returns simple json only with text property that contains clean surfaceText.
    :param input_file: conceptnet json file
    :return: list of items with "text" key.
    """

    def mask_rel(rel):
        return "@{0}@".format(rel.replace("/r/", "cn_"))

    items = []
    for l_id, line in enumerate(open(input_file, mode="r")):
        item = json.loads(line.strip())

        text = " ".join([item["surfaceStart"], mask_rel(item["rel"]), item["surfaceEnd"]])
        items.append({"text": text})

    return items 
Example 3
Source File: elf_basic.py    From xom-switch with GNU General Public License v2.0 6 votes vote down vote up
def read_obj_reloctable(self, objfile):
        rreloc = re.compile(r'^\s*([0-9a-fA-F]+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)')
        cmdreloc = "readelf -r -W %s" % objfile
        reloc_table = dict()
        with os.popen(cmdreloc) as file:
            for line in file:
                line = line.strip()
                m = rreloc.match(line)
                if(m != None):
                    sym = dict()
                    sym['offset'] = int(m.group(1), 16)
                    sym['info'] = m.group(2)
                    sym['type'] = m.group(3)
                    sym['value'] = m.group(4)
                    sym['name'] = m.group(5)
                    reloc_table[sym['offset']] = sym
        print "relocation table len: %d\n" % len(reloc_table)
        return reloc_table 
Example 4
Source File: windows_utils.py    From mmvt with GNU General Public License v3.0 6 votes vote down vote up
def readStringObj(self, scContent, position):
        """
        returns:
            tuple: (newPosition, string)
        """
        strg = ''
        size = struct.unpack('H', scContent[position:position+2])[0]
        #logger.debug("workingDirSize={}".format(size))
        if (self._lnkFlags & 0x80): # unicode
            size *= 2
            strg = struct.unpack(str(size)+'s', scContent[position+2:position+2+size])[0]
            strg = strg.decode('utf-16')
        else:
            strg = struct.unpack(str(size)+'s', scContent[position+2:position+2+size])[0].decode('ascii')
        #logger.debug("strg='{}'".format(strg))
        position += size + 2 # 2 bytes to account for CountCharacters field

        return (position, strg) 
Example 5
Source File: file_ops.py    From question-classification with MIT License 6 votes vote down vote up
def read_obj(file_key, rp):
    """
    :argument:
        :param file_key: A string which represents the raw data file, in properties.conf,
                          used for the process (experiment).
        :param rp: Absolute path of the root directory of the project
        # Note: File should be using UTF-8 encoding. Change the encoding as needed.
    :exception:
        :except IOError: This may occur because of many reasons. e.g file is missing or corrupt file or wrong file path
    :return:
        boolean_flag: True for successful read operation.
        file: TextIOWrapper for the file corresponding to the `file_name` key in properties.conf
    """
    try:
        file = open(read_key(file_key, rp), "rb")
        obj = pickle.load(file)
        return True, obj
    except IOError as e:
        print("File IO Error :: Cannot open " + read_key(file_key, rp) + "\n" + str(e))
        return False 
Example 6
Source File: datatypeclass.py    From open-context-py with GNU General Public License v3.0 6 votes vote down vote up
def read_dict_obj(self, dict_obj):
        """ reads a dict object for the class instance """
        ok = False
        if isinstance(dict_obj, dict):
            try:
                ok = True
                self.id = dict_obj['id']
                self.label = dict_obj['label']
                self.data_type = dict_obj['data_type']
                self.total_count = dict_obj['total_count']
                self.datetime_count = dict_obj['datetime_count']
                self.int_count = dict_obj['int_count']
                self.float_count = dict_obj['float_count']
                self.boolean_count = dict_obj['boolean_count']
                self.uniqe_str_hashes = dict_obj['uniqe_str_hashes']
            except:
                print('Something bad happened!')
                ok = False
        return ok 
Example 7
Source File: readOBJ.py    From BlenderToolbox with Apache License 2.0 6 votes vote down vote up
def readOBJ(filePath, location, rotation_euler, scale):
	x = rotation_euler[0] * 1.0 / 180.0 * np.pi 
	y = rotation_euler[1] * 1.0 / 180.0 * np.pi 
	z = rotation_euler[2] * 1.0 / 180.0 * np.pi 
	angle = (x,y,z)

	prev = []
	for ii in range(len(list(bpy.data.objects))):
		prev.append(bpy.data.objects[ii].name)
	bpy.ops.import_scene.obj(filepath=filePath, split_mode='OFF')
	after = []
	for ii in range(len(list(bpy.data.objects))):
		after.append(bpy.data.objects[ii].name)
	name = list(set(after) - set(prev))[0]
	mesh = bpy.data.objects[name]

	mesh.location = location
	mesh.rotation_euler = angle
	mesh.scale = scale
	bpy.context.view_layer.update()

	return mesh 
Example 8
Source File: base.py    From seafobj with Apache License 2.0 5 votes vote down vote up
def read_obj(self, repo_id, version, obj_id):
        try:
            data = self.read_obj_raw(repo_id, version, obj_id)
            if self.crypto:
                data = self.crypto.dec_data(data)
            if self.compressed and version == 1:
                data = zlib.decompress(data)
        except Exception as e:
            raise GetObjectError('Failed to read object %s/%s: %s' % (repo_id, obj_id, e))

        return data 
Example 9
Source File: obj.py    From DEODR with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def read_obj(filename):

    faces = []
    vertices = []
    fid = open(filename, "r")
    node_counter = 0
    while True:

        line = fid.readline()
        if line == "":
            break
        while line.endswith("\\"):
            # Remove backslash and concatenate with next line
            line = line[:-1] + fid.readline()
        if line.startswith("v"):
            coord = line.split()
            coord.pop(0)
            node_counter += 1
            vertices.append(np.array([float(c) for c in coord]))

        elif line.startswith("f "):
            fields = line.split()
            fields.pop(0)

            # in some obj faces are defined as -70//-70 -69//-69 -62//-62
            cleaned_fields = []
            for f in fields:
                f = int(f.split("/")[0]) - 1
                if f < 0:
                    f = node_counter + f
                cleaned_fields.append(f)
            faces.append(np.array(cleaned_fields))

    faces = np.row_stack(faces)
    vertices = np.row_stack(vertices)
    return faces, vertices 
Example 10
Source File: file.py    From DeepEHR with MIT License 5 votes vote down vote up
def read_obj (filename, logout = True):
	try:
		data = pickle.load (open(filename, 'rb'))
		return data
	except Exception as e:
		if logout is True:
			log.error(e)
		return None
	
	
### write operations ###

# write data in format of [(x1,y1,z1),(x2,y2,z2)] to a csv file 
Example 11
Source File: object.py    From darkc0de-old-stuff with GNU General Public License v3.0 5 votes vote down vote up
def read_obj(addr_space, types, member_list, vaddr):
    """
    Read the low-level value for some complex type's member.
    The type must have members.
    """
    if len(member_list) < 2:
        raise Exception('Invalid type/member ' + str(member_list))
    

    
    (offset, current_type) = get_obj_offset(types, member_list)
    return read_value(addr_space, current_type, vaddr + offset) 
Example 12
Source File: stl_slicer.py    From fluxclient with GNU Affero General Public License v3.0 5 votes vote down vote up
def read_obj(cls, file_data):
        if type(file_data) == str:
            with open(file_data, 'rb') as f:
                file_data = f.read()
        elif type(file_data) == bytes:
            pass
        else:
            raise ValueError('wrong stl data type: %s' % str(type(file_data)))

        in_obj = StringIO(file_data.decode('utf8'))
        points_list = []
        faces = []
        while True:
            t = read_until(in_obj)
            if len(t) == 0:
                break
            elif t.startswith('#'):  # comment
                continue
            t = t.split()
            if t[0] == 'v':
                points_list.append(tuple(float(t[j]) for j in range(1, 4)))
            elif t[0] == 'f':
                t = [i.split('/') for i in t]
                faces.append([int(t[j][0]) for j in range(1, 4)])
            else:
                pass
        for i in range(len(faces)):
            for j in range(3):
                if faces[i][j] > 0:
                    faces[i][j] -= 1
                else:
                    faces[i][j] = len(points_list) + faces[i][j]

        logger.debug("Faces[0] type %s ", type(faces).__name__)
        return _printer.MeshCloud(points_list), faces 
Example 13
Source File: ioUtils.py    From director with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def readObjMtl(filename):
    '''
    Read an obj file and return a list of vtkPolyData objects.
    If the obj file has an associated material file, this function returns
    (polyDataList, actors).  If there is not a material file, this function
    returns (polyDataList, None).
    '''

    def getMtlFilename(filename, maxLines=1000):
        with open(filename) as f:
            for i, l in enumerate(f):
                if l.startswith('mtllib'):
                    tokens = l.split()
                    if len(tokens) < 2:
                        raise Exception('Error parsing mtllib line in file: %s\n%s' % (filename, l))
                    return os.path.join(os.path.dirname(filename), tokens[1])

    mtlFilename = getMtlFilename(filename)

    l = vtk.vtkOBJImporter()
    l.SetFileName(filename)
    if mtlFilename:
        l.SetFileNameMTL(mtlFilename)
    l.SetTexturePath(os.path.dirname(filename))
    l.Read()
    w = l.GetRenderWindow()
    ren = w.GetRenderers().GetItemAsObject(0)
    actors = ren.GetActors()
    actors = [actors.GetItemAsObject(i) for i in range(actors.GetNumberOfItems())]
    meshes = [a.GetMapper().GetInput() for a in actors]

    if mtlFilename:
        return (meshes, actors)
    else:
        return (meshes, None) 
Example 14
Source File: base.py    From seafobj with Apache License 2.0 5 votes vote down vote up
def read_obj_raw(self, repo_id, version, obj_id):
        '''Read the raw content of the object from the backend. Each backend
        subclass should have their own implementation.

        '''
        raise NotImplementedError 
Example 15
Source File: io.py    From soccerontable with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def read_obj(filename):
    with open(filename) as f:
        content = f.readlines()

        vertex = []
        normal = []
        faces = []
        for i in range(len(content)):
            line = content[i]
            if line[0] == '#':
                continue

            if line[0:2] == 'vn':
                normal_info = line.replace(' \n', '').replace('vn ', '')
            elif line[0:2] == 'v ':
                vertex_info = line.replace(' \n', '').replace('v ', '')
                vertex.append(np.array(vertex_info.split(' ')).astype(float))

            elif line[0:2] == 'f ':
                face_info = line.replace(' \n', '').replace('f ', '')
                faces.append(np.array(face_info.split(' ')).astype(int)-1)

    vertex_data = []
    for i in range(len(vertex)):
        vertex_data.append({'x': vertex[i][0], 'y': vertex[i][1], 'z': vertex[i][2]})

        face_normals = []
    for i in range(len(faces)):
        v0, v1, v2 = vertex[faces[i][0]-1], vertex[faces[i][1]-1], vertex[faces[i][2]-1]
        dir0 = v1 - v0
        dir1 = v2 - v0
        face_normal_ = np.cross(dir0 / np.linalg.norm(dir0), dir1 / np.linalg.norm(dir1))
        face_normals.append(face_normal_)

    return vertex_data, faces, face_normals 
Example 16
Source File: bboxClassHistogram.py    From EAD2019 with MIT License 5 votes vote down vote up
def read_obj_names(textfile):

    classnames = []

    with open(textfile) as f:
        for line in f:
            line = line.strip('\n')
            if len(line)>0:
                classnames.append(line)

    return np.hstack(classnames) 
Example 17
Source File: utils.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def read_obj_file(obj_file):
    with open(obj_file, 'r') as f:
        lines = f.readlines()
        verts = np.array([[float(v) for v in l.strip().split(' ')[1:]] for l in lines if l[0]=='v'])
        faces = np.array([[int(v) for v in l.strip().split(' ')[1:]] for l in lines if l[0]=='f'])
    faces -= 1
    return verts, faces 
Example 18
Source File: show_fmri.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def read_obj(obj_file):
    obj_rh = obj_file
    with open(obj_rh, 'r') as f:
        pos = np.array([map(float, line.strip().split(' ')[1:]) for line in f.readlines() if line.strip().split(' ')[0] == 'v'])
    return pos 
Example 19
Source File: fetching.py    From kopf with MIT License 5 votes vote down vote up
def read_obj(
        *,
        resource: resources.Resource,
        namespace: Optional[str] = None,
        name: Optional[str] = None,
        default: Union[_T, _UNSET] = _UNSET.token,
        context: Optional[auth.APIContext] = None,  # injected by the decorator
) -> Union[bodies.RawBody, _T]:
    if context is None:
        raise RuntimeError("API instance is not injected by the decorator.")

    is_namespaced = await discovery.is_namespaced(resource=resource, context=context)
    namespace = namespace if is_namespaced else None

    try:
        response = await context.session.get(
            url=resource.get_url(server=context.server, namespace=namespace, name=name),
        )
        response.raise_for_status()
        respdata = await response.json()
        return cast(bodies.RawBody, respdata)

    except aiohttp.ClientResponseError as e:
        if e.status in [403, 404] and not isinstance(default, _UNSET):
            return default
        raise 
Example 20
Source File: fig_model.py    From uniconvertor with GNU Affero General Public License v3.0 5 votes vote down vote up
def read_obj(self, loader):
        cid_map = {
            OBJ_COLOR_DEF: FIGColorDef,
            OBJ_ELLIPSE: FIGEllipse,
            OBJ_POLYLINE: FIGPolyline,
            OBJ_SPLINE: FIGSpline,
            OBJ_TEXT: FIGText,
            OBJ_ARC: FIGArc,
            OBJ_COMPOUND: FIGCompound
        }

        while True:
            line = loader.get_line(strip=False)
            if line is None:
                break
            childs = loader.stack[-1]
            cid = int(line.split(' ', 1)[0])
            obj_class = cid_map.get(cid)
            if obj_class:
                obj = obj_class()
                obj.parse(loader, line)
                childs.append(obj)
            if cid == OBJ_COMPOUND:
                loader.stack.append(obj.childs)
            elif cid == OBJ_COMPOUND_ENDS:
                loader.stack.pop() 
Example 21
Source File: elf_basic.py    From xom-switch with GNU General Public License v2.0 5 votes vote down vote up
def read_obj_symtable(self, objfile):
        rsym = re.compile(r'^(\S+)\s+(\S+)\s+(\S+)\s*$')
        cmdsym = "nm %s" % objfile
        sym_table = dict()
        with os.popen(cmdsym) as file:
            for line in file:
                line = line.strip()
                m = rsym.match(line)
                if(m != None):
                    sym = dict()
                    sym['offset'] = int(m.group(1), 16)
                    sym['type'] = m.group(2)
                    sym['name'] = m.group(3)
                    sym_table[sym['name']] = sym
        return sym_table 
Example 22
Source File: permissions.py    From GloboNetworkAPI with Apache License 2.0 5 votes vote down vote up
def read_obj_permission(request, *args, **kwargs):

    class Perm(BasePermission):

        def has_permission(self, request, view):
            return perm_obj(
                request,
                AdminPermission.OBJ_READ_OPERATION,
                AdminPermission.OBJ_TYPE_VIP,
                *args,
                **kwargs
            )

    return Perm 
Example 23
Source File: file.py    From Valx with GNU General Public License v3.0 5 votes vote down vote up
def read_obj (filename, logout = True):
	try:
		data = cPickle.load (open(filename, 'rb'))
		return data
	except Exception as e:
		if logout is True:
			log.error(e)
		return None
	
	
### write operations ###

# write data in format of [(x1,y1,z1),(x2,y2,z2)] to a csv file 
Example 24
Source File: io_mesh.py    From surface_tools with MIT License 5 votes vote down vote up
def read_obj(file):
    def chunks(l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]
    def indices(lst,element):
        result=[]
        offset = -1
        while True:
            try:
                offset=lst.index(element,offset+1)
            except ValueError:
                return result
            result.append(offset)
    fp=open(file,'r')
    n_vert=[]
    n_poly=[]
    k=0
    Polys=[]
	# Find number of vertices and number of polygons, stored in .obj file.
	#Then extract list of all vertices in polygons
    for i, line in enumerate(fp):
         if i==0:
    	#Number of vertices
             n_vert=int(line.split()[6])
             XYZ=np.zeros([n_vert,3])
         elif i<=n_vert:
             XYZ[i-1]=list(map(float,line.split()))
         elif i>2*n_vert+5:
             if not line.strip():
                 k=1
             elif k==1:
                 Polys.extend(line.split())
    Polys=list(map(int,Polys))
    npPolys=np.array(Polys)
    triangles=np.array(list(chunks(Polys,3)))
    return XYZ, triangles;



# function to save mesh geometry 
Example 25
Source File: WCS.py    From WorstCaseStack with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read_obj(tu, call_graph):
    """
    Reads the file tu.o and gets the binding (global or local) for each function
    :param tu: name of the translation unit (e.g. for main.c, this would be 'main')
    :param call_graph: a object used to store information about each function, results go here
    """
    symbols = read_symbols(tu[0:tu.rindex(".")] + obj_ext)

    for s in symbols:

        if s.type == 'FUNC':
            if s.binding == 'GLOBAL':
                # Check for multiple declarations
                if s.name in call_graph['globals'] or s.name in call_graph['locals']:
                    raise Exception('Multiple declarations of {}'.format(s.name))
                call_graph['globals'][s.name] = {'tu': tu, 'name': s.name, 'binding': s.binding}
            elif s.binding == 'LOCAL':
                # Check for multiple declarations
                if s.name in call_graph['locals'] and tu in call_graph['locals'][s.name]:
                    raise Exception('Multiple declarations of {}'.format(s.name))

                if s.name not in call_graph['locals']:
                    call_graph['locals'][s.name] = {}

                call_graph['locals'][s.name][tu] = {'tu': tu, 'name': s.name, 'binding': s.binding}
            elif s.binding == 'WEAK':
                if s.name in call_graph['weak']:
                    raise Exception('Multiple declarations of {}'.format(s.name))
                call_graph['weak'][s.name] = {'tu': tu, 'name': s.name, 'binding': s.binding}
            else:
                raise Exception('Error Unknown Binding "{}" for symbol: {}'.format(s.binding, s.name)) 
Example 26
Source File: common.py    From occupancy_networks with MIT License 4 votes vote down vote up
def read_obj(file):
    """
    Reads vertices and faces from an obj file.

    :param file: path to file to read
    :type file: str
    :return: vertices and faces as lists of tuples
    :rtype: [(float)], [(int)]
    """

    assert os.path.exists(file), 'file %s not found' % file

    with open(file, 'r') as fp:
        lines = fp.readlines()
        lines = [line.strip() for line in lines if line.strip()]

        vertices = []
        faces = []
        for line in lines:
            parts = line.split(' ')
            parts = [part.strip() for part in parts if part]

            if parts[0] == 'v':
                assert len(parts) == 4, \
                    'vertex should be of the form v x y z, but found %d parts instead (%s)' % (len(parts), file)
                assert parts[1] != '', 'vertex x coordinate is empty (%s)' % file
                assert parts[2] != '', 'vertex y coordinate is empty (%s)' % file
                assert parts[3] != '', 'vertex z coordinate is empty (%s)' % file

                vertices.append([float(parts[1]), float(parts[2]), float(parts[3])])
            elif parts[0] == 'f':
                assert len(parts) == 4, \
                    'face should be of the form f v1/vt1/vn1 v2/vt2/vn2 v2/vt2/vn2, but found %d parts (%s) instead (%s)' % (len(parts), line, file)

                components = parts[1].split('/')
                assert len(components) >= 1 and len(components) <= 3, \
                   'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
                assert components[0].strip() != '', \
                    'face component is empty (%s)' % file
                v1 = int(components[0])

                components = parts[2].split('/')
                assert len(components) >= 1 and len(components) <= 3, \
                    'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
                assert components[0].strip() != '', \
                    'face component is empty (%s)' % file
                v2 = int(components[0])

                components = parts[3].split('/')
                assert len(components) >= 1 and len(components) <= 3, \
                    'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
                assert components[0].strip() != '', \
                    'face component is empty (%s)' % file
                v3 = int(components[0])

                #assert v1 != v2 and v2 != v3 and v3 != v2, 'degenerate face detected: %d %d %d (%s)' % (v1, v2, v3, file)
                if v1 == v2 or v2 == v3 or v1 == v3:
                    print('[Info] skipping degenerate face in %s' % file)
                else:
                    faces.append([v1 - 1, v2 - 1, v3 - 1]) # indices are 1-based!
            else:
                assert False, 'expected either vertex or face but got line: %s (%s)' % (line, file)

        return vertices, faces

    assert False, 'could not open %s' % file 
Example 27
Source File: SettingsManager.py    From moviegrabber with GNU General Public License v3.0 4 votes vote down vote up
def readSettingsFromConfigFileObj(self, inFile, convert=True):
        """Return the settings from a config file that uses the syntax accepted by
        Python's standard ConfigParser module (like Windows .ini files).

        NOTE:
        this method maintains case unlike the ConfigParser module, unless this
        class was initialized with the 'caseSensitive' keyword set to False.

        All setting values are initially parsed as strings. However, If the
        'convert' arg is True this method will do the following value
        conversions:
        
        * all Python numeric literals will be coverted from string to number
        
        * The string 'None' will be converted to the Python value None
        
        * The string 'True' will be converted to a Python truth value
        
        * The string 'False' will be converted to a Python false value
        
        * Any string starting with 'python:' will be treated as a Python literal
          or expression that needs to be eval'd. This approach is useful for
          declaring lists and dictionaries.

        If a config section titled 'Globals' is present the options defined
        under it will be treated as top-level settings.        
        """
        
        p = self._ConfigParserClass()
        p.readfp(inFile)
        sects = p.sections()
        newSettings = {}

        sects = p.sections()
        newSettings = {}
        
        for s in sects:
            newSettings[s] = {}
            for o in p.options(s):
                if o != '__name__':
                    newSettings[s][o] = p.get(s, o)

        ## loop through new settings -> deal with global settings, numbers,
        ## booleans and None ++ also deal with 'importSettings' commands

        for sect, subDict in newSettings.items():
            for key, val in subDict.items():
                if convert:
                    if val.lower().startswith('python:'):
                        subDict[key] = eval(val[7:], {}, {})
                    if val.lower() == 'none':
                        subDict[key] = None
                    if val.lower() == 'true':
                        subDict[key] = True
                    if val.lower() == 'false':
                        subDict[key] = False
                    if stringIsNumber(val):
                        subDict[key] = convStringToNum(val)
                        
                ## now deal with any 'importSettings' commands
                if key.lower() == 'importsettings':
                    if val.find(';') < 0:
                        importedSettings = self.readSettingsFromPySrcFile(val)
                    else:
                        path = val.split(';')[0]
                        rest = ''.join(val.split(';')[1:]).strip()
                        parentDict = self.readSettingsFromPySrcFile(path)
                        importedSettings = eval('parentDict["' + rest + '"]')
                        
                    subDict.update(mergeNestedDictionaries(subDict,
                                                           importedSettings))
                        
            if sect.lower() == 'globals':
                newSettings.update(newSettings[sect])
                del newSettings[sect]
                
        return newSettings 
Example 28
Source File: torchfile.py    From tensorflow-litterbox with Apache License 2.0 4 votes vote down vote up
def read_obj(self):
        typeidx = self.read_int()
        if typeidx == TYPE_NIL:
            return None
        elif typeidx == TYPE_NUMBER:
            x = self.read_double()
            # Extra checking for integral numbers:
            if self.use_int_heuristic and x.is_integer():
                return int(x)
            return x
        elif typeidx == TYPE_BOOLEAN:
            return self.read_boolean()
        elif typeidx == TYPE_STRING:
            return self.read_string()
        elif (typeidx == TYPE_TABLE or typeidx == TYPE_TORCH
                or typeidx == TYPE_FUNCTION or typeidx == TYPE_RECUR_FUNCTION
                or typeidx == LEGACY_TYPE_RECUR_FUNCTION):
            # read the index
            index = self.read_int()

            # check it is loaded already
            if index in self.objects:
                return self.objects[index]

            # otherwise read it
            if (typeidx == TYPE_FUNCTION or typeidx == TYPE_RECUR_FUNCTION
                    or typeidx == LEGACY_TYPE_RECUR_FUNCTION):
                size = self.read_int()
                dumped = self.f.read(size)
                upvalues = self.read_obj()
                obj = LuaFunction(size, dumped, upvalues)
                self.objects[index] = obj
                return obj
            elif typeidx == TYPE_TORCH:
                version = self.read_string()
                if version.startswith('V '):
                    versionNumber = int(version.partition(' ')[2])
                    className = self.read_string()
                else:
                    className = version
                    versionNumber = 0  # created before existence of versioning
                if className not in torch_readers:
                    if not self.force_deserialize_classes:
                        raise T7ReaderException(
                            'unsupported torch class: <%s>' % className)
                    obj = TorchObject(className, self.read_obj())
                else:
                    obj = torch_readers[className](self, version)
                self.objects[index] = obj
                return obj
            else:  # it is a table: returns a custom dict or a list
                size = self.read_int()
                obj = hashable_uniq_dict()  # custom hashable dict, can be a key
                key_sum = 0                # for checking if keys are consecutive
                keys_natural = True        # and also natural numbers 1..n.
                # If so, returns a list with indices converted to 0-indices.
                for i in range(size):
                    k = self.read_obj()
                    v = self.read_obj()
                    obj[k] = v

                    if self.use_list_heuristic:
                        if not isinstance(k, int) or k <= 0:
                            keys_natural = False
                        elif isinstance(k, int):
                            key_sum += k
                if self.use_list_heuristic:
                    # n(n+1)/2 = sum <=> consecutive and natural numbers
                    n = len(obj)
                    if keys_natural and n * (n + 1) == 2 * key_sum:
                        lst = []
                        for i in range(len(obj)):
                            lst.append(obj[i + 1])
                        obj = lst
                self.objects[index] = obj
                return obj
        else:
            raise T7ReaderException("unknown object") 
Example 29
Source File: B28_importOBJ_principledShader.py    From blender_addons with GNU General Public License v3.0 4 votes vote down vote up
def readObj(objfile):
    #objfile = 'H:/01_Projects/Thomas_Projects/3dcoat/Flink_Dose/Export02/Dose02.obj'
    dirname = os.path.dirname(objfile)
    with open(objfile,'r') as f:
        objlines = f.read().splitlines()

    objs = {}
    objname = ''
    for line in objlines:
        if line:
            split_line = line.strip().split(' ', 1)
            if len(split_line) < 2:
                continue

            prefix, value = split_line[0], split_line[1]
            if prefix == 'g':
                objname = value
                objs[objname] = {'name' : value}
            # For files without an 'o' statement
            elif prefix == 'usemtl':
                objs[objname]['matname'] = value
    
    with open(objfile.replace('.obj', '.mtl'), 'r') as f:
        mtllines = f.read().splitlines()

    materials = {}
    matname = ''
    for line in mtllines:
        if line:
            split_line = line.strip().split(' ', 1)
            if len(split_line) < 2:
                continue

            prefix, data = split_line[0], split_line[1]
            if 'newmtl' in prefix:
                matname = data
                materials[matname] = {}
            elif prefix == 'map_Kd':
                materials[matname]['BaseColor'] = dirname + '\\' + data
            elif prefix == 'map_Ks':
                materials[matname]['Roughness'] = dirname + '\\' + data
            elif prefix == 'bump':
                materials[matname]['Displacement'] = dirname + '\\' + split_line[-1].split('\\')[-1]
                try:
                    materials[matname]['displ_value'] = split_line[-1].split(' ')[-2]
                except:
                    materials[matname]['displ_value'] = 0
            elif prefix == 'map_bump':
                materials[matname]['Normal'] = dirname + '\\' + data
    print(dirname)
    for matname in materials.keys():
        for i in os.listdir(dirname):
            for n in [matname, os.path.basename(objfile).split('.')[0]]:
                if n+'_ao' in i:
                    materials[matname]['ao'] = dirname + '\\' + i
                if n+'_EmissiveColor' in i:
                    materials[matname]['EmissiveColor'] = dirname + '\\' + i
                if n+'_Metalness' in i:
                    materials[matname]['Metalness'] = dirname + '\\' + i
                if n+'_Opacity' in i:
                    materials[matname]['Opacity'] = dirname + '\\' + i
    #print(materials)
    return objs, materials 
Example 30
Source File: model3d.py    From Beginning-Game-Development-with-Python-and-Pygame with MIT License 4 votes vote down vote up
def read_obj(self, fname):

        current_face_group = None

        file_in = open(fname)

        for line in file_in:

            # Parse command and data from each line
            words = line.split()
            command = words[0]
            data = words[1:]

            if command == 'mtllib': # Material library

                model_path = os.path.split(fname)[0]
                mtllib_path = os.path.join( model_path, data[0] )                                
                self.read_mtllib(mtllib_path)
                
            elif command == 'v': # Vertex
                x, y, z = data
                vertex = (float(x), float(y), float(z))
                self.vertices.append(vertex)

            elif command == 'vt': # Texture coordinate

                s, t = data
                tex_coord = (float(s), float(t))
                self.tex_coords.append(tex_coord)

            elif command == 'vn': # Normal

                x, y, z = data
                normal = (float(x), float(y), float(z))
                self.normals.append(normal)

            elif command == 'usemtl' : # Use material

                current_face_group = FaceGroup()
                current_face_group.material_name = data[0]
                self.face_groups.append( current_face_group )

            elif command == 'f':

                assert len(data) ==  3, "Sorry, only triangles are supported"

                # Parse indices from triples
                for word in data:
                    vi, ti, ni = word.split('/')
                    indices = (int(vi) - 1, int(ti) - 1, int(ni) - 1)
                    current_face_group.tri_indices.append(indices)


        for material in self.materials.values():

            model_path = os.path.split(fname)[0]
            texture_path = os.path.join(model_path, material.texture_fname)
            texture_surface = pygame.image.load(texture_path)
            texture_data = pygame.image.tostring(texture_surface, 'RGB', True)

            material.texture_id = glGenTextures(1)
            glBindTexture(GL_TEXTURE_2D, material.texture_id)

            glTexParameteri( GL_TEXTURE_2D,
                             GL_TEXTURE_MAG_FILTER,
                             GL_LINEAR)
            glTexParameteri( GL_TEXTURE_2D,
                             GL_TEXTURE_MIN_FILTER,
                             GL_LINEAR_MIPMAP_LINEAR)

            glPixelStorei(GL_UNPACK_ALIGNMENT,1)
            width, height = texture_surface.get_rect().size
            gluBuild2DMipmaps( GL_TEXTURE_2D,
                               3,
                               width,
                               height,
                               GL_RGB,
                               GL_UNSIGNED_BYTE,
                               texture_data) 
Example 31
Source File: torchfile.py    From gait-recognition with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def read_obj(self):
        typeidx = self.read_int()
        if typeidx == TYPE_NIL:
            return None
        elif typeidx == TYPE_NUMBER:
            x = self.read_double()
            # Extra checking for integral numbers:
            if self.use_int_heuristic and x.is_integer():
                return int(x)
            return x
        elif typeidx == TYPE_BOOLEAN:
            return self.read_boolean()
        elif typeidx == TYPE_STRING:
            return self.read_string()
        elif (typeidx == TYPE_TABLE or typeidx == TYPE_TORCH
                or typeidx == TYPE_FUNCTION or typeidx == TYPE_RECUR_FUNCTION
                or typeidx == LEGACY_TYPE_RECUR_FUNCTION):
            # read the index
            index = self.read_int()

            # check it is loaded already
            if index in self.objects:
                return self.objects[index]

            # otherwise read it
            if (typeidx == TYPE_FUNCTION or typeidx == TYPE_RECUR_FUNCTION
                    or typeidx == LEGACY_TYPE_RECUR_FUNCTION):
                size = self.read_int()
                dumped = self.f.read(size)
                upvalues = self.read_obj()
                obj = LuaFunction(size, dumped, upvalues)
                self.objects[index] = obj
                return obj
            elif typeidx == TYPE_TORCH:
                version = self.read_string()
                if version.startswith(b'V '):
                    versionNumber = int(version.partition(b' ')[2])
                    className = self.read_string()
                else:
                    className = version
                    versionNumber = 0  # created before existence of versioning
                # print(className)
                if className not in torch_readers:
                    if not self.force_deserialize_classes:
                        raise T7ReaderException(
                            'unsupported torch class: <%s>' % className)
                    obj = TorchObject(className, self.read_obj())
                else:
                    obj = torch_readers[className](self, version)
                self.objects[index] = obj
                return obj
            else:  # it is a table: returns a custom dict or a list
                size = self.read_int()
                obj = hashable_uniq_dict()  # custom hashable dict, can be a key
                key_sum = 0                # for checking if keys are consecutive
                keys_natural = True        # and also natural numbers 1..n.
                # If so, returns a list with indices converted to 0-indices.
                for i in range(size):
                    k = self.read_obj()
                    v = self.read_obj()
                    obj[k] = v

                    if self.use_list_heuristic:
                        if not isinstance(k, int) or k <= 0:
                            keys_natural = False
                        elif isinstance(k, int):
                            key_sum += k
                if self.use_list_heuristic:
                    # n(n+1)/2 = sum <=> consecutive and natural numbers
                    n = len(obj)
                    if keys_natural and n * (n + 1) == 2 * key_sum:
                        lst = []
                        for i in range(len(obj)):
                            lst.append(obj[i + 1])
                        obj = lst
                self.objects[index] = obj
                return obj
        else:
            raise T7ReaderException("unknown object") 
Example 32
Source File: obj.py    From pyntcloud with MIT License 4 votes vote down vote up
def read_obj(filename):
    """ Reads and obj file and return the elements as pandas Dataframes.

    Parameters
    ----------
    filename: str
        Path to the obj file.

    Returns
    -------
    Each obj element found as pandas Dataframe.

    """
    v = []
    vn = []
    vt = []
    f = []

    with open(filename) as obj:
        for line in obj:
            if line.startswith('v '):
                v.append(line.strip()[1:].split())

            elif line.startswith('vn'):
                vn.append(line.strip()[2:].split())

            elif line.startswith('vt'):
                vt.append(line.strip()[2:].split())

            elif line.startswith('f'):
                f.append(line.strip()[1:].lstrip())

    points = pd.DataFrame(v, dtype='f4', columns=["x", "y", "z", "w"][:len(v[0])])

    if len(vn) > 0:
        points = points.join(pd.DataFrame(vn, dtype='f4', columns=['nx', 'ny', 'nz']))

    if len(vt) > 0:
        points = points.join(pd.DataFrame(vt, dtype='f4', columns=['u', 'v']))

    data = {"points": points}

    if len(f) < 1:
        return data

    mesh_columns = []
    if f[0].count("//") > 0:
        # wikipedia.org/wiki/Wavefront_.obj_file#Vertex_normal_indices_without_texture_coordinate_indices
        for i in range(f[0].count("//")):
            mesh_columns.append("v{}".format(i + 1))
            mesh_columns.append("vn{}".format(i + 1))
    elif f[0].count("/") > 0:
        if len(vn) > 0:
            # wikipedia.org/wiki/Wavefront_.obj_file#Vertex_normal_indices
            for i in range(f[0].count("/") / 2):
                mesh_columns.append("v{}".format(i + 1))
                mesh_columns.append("vt{}".format(i + 1))
                mesh_columns.append("vn{}".format(i + 1))
        else:
            # wikipedia.org/wiki/Wavefront_.obj_file#Vertex_texture_coordinate_indices
            for i in range(f[0].count("/")):
                mesh_columns.append("v{}".format(i + 1))
                mesh_columns.append("vt{}".format(i + 1))
    else:
        # wikipedia.org/wiki/Wavefront_.obj_file#Vertex_indices
        for i in range(sum(c.isdigit() for c in f[0].split(" "))):
            mesh_columns.append("v{}".format(i + 1))

    mesh = pd.DataFrame([re.split(r'\D+', x) for x in f], dtype='i4', columns=mesh_columns).astype('i4')
    mesh -= 1  # index starts with 1 in obj file

    data["mesh"] = mesh

    return data 
Example 33
Source File: kitti_utils.py    From Stereo-RCNN with MIT License 4 votes vote down vote up
def read_obj_calibration(CALIB_PATH):
    ''' Reads in Calibration file from Kitti Dataset.
        
        Inputs:
        CALIB_PATH : Str PATH of the calibration file.
        
        Returns:
        frame_calibration_info : FrameCalibrationData
                                Contains a frame's full calibration data.
        ^ z        ^ z                                      ^ z         ^ z
        | cam2     | cam0                                   | cam3      | cam1
        |-----> x  |-----> x                                |-----> x   |-----> x

    '''
    frame_calibration_info = FrameCalibrationData()

    data_file = open(CALIB_PATH, 'r')
    data_reader = csv.reader(data_file, delimiter=' ')
    data = []

    for row in data_reader:
        data.append(row)

    data_file.close()

    p_all = []

    for i in range(4):
        p = data[i]
        p = p[1:]
        p = [float(p[i]) for i in range(len(p))]
        p = np.reshape(p, (3, 4))
        p_all.append(p)

    # based on camera 0
    frame_calibration_info.p0 = p_all[0]
    frame_calibration_info.p1 = p_all[1]
    frame_calibration_info.p2 = p_all[2]
    frame_calibration_info.p3 = p_all[3]
    
    # based on camera 2
    frame_calibration_info.p2_2 = np.copy(p_all[2]) 
    frame_calibration_info.p2_2[0,3] = frame_calibration_info.p2_2[0,3] - frame_calibration_info.p2[0,3]

    frame_calibration_info.p2_3 = np.copy(p_all[3]) 
    frame_calibration_info.p2_3[0,3] = frame_calibration_info.p2_3[0,3] - frame_calibration_info.p2[0,3]

    frame_calibration_info.t_cam2_cam0 = np.zeros(3)
    frame_calibration_info.t_cam2_cam0[0] = (frame_calibration_info.p2[0,3] - frame_calibration_info.p0[0,3])/frame_calibration_info.p2[0,0]

    # Read in rectification matrix
    tr_rect = data[4]
    tr_rect = tr_rect[1:]
    tr_rect = [float(tr_rect[i]) for i in range(len(tr_rect))]
    frame_calibration_info.r0_rect = np.reshape(tr_rect, (3, 3))

    # Read in velodyne to cam matrix
    tr_v2c = data[5]
    tr_v2c = tr_v2c[1:]
    tr_v2c = [float(tr_v2c[i]) for i in range(len(tr_v2c))]
    frame_calibration_info.tr_velodyne_to_cam0 = np.reshape(tr_v2c, (3, 4))

    return frame_calibration_info 
Example 34
Source File: World.py    From ChauffeurNet with MIT License 4 votes vote down vote up
def read_obj_file(self, path):

        #TODO might need to read the lines between vertices
        #TODO
        #TODO

        with open(path) as file:
            all_lines = file.readlines()
            file.close()

        all_objects = {}
        i = 0
        while i < len(all_lines):
            line = all_lines[i]

            if line[0] == "o":
                object_name = line.split(" ")[-1].replace("\n", "")
                object_vertices = []
                object_lines = []
                i += 1
                while i < len(all_lines) and all_lines[i][0] == "v":
                    object_vertices.append(all_lines[i])
                    i += 1
                while i < len(all_lines) and all_lines[i][0] == "l":
                    object_lines.append(all_lines[i])
                    i +=1
                all_objects[object_name] = {"verts":object_vertices,
                                            "lines":object_lines}
            else:
                i += 1

        for objname in all_objects.keys():
            object_vertices = all_objects[objname]["verts"]
            vertices_numeric = []
            for vertex in object_vertices:
                coords_str = vertex.replace("v ", "").replace("\n", "").split(" ") + ["1.0"]
                coords_numeric = [float(value) for value in coords_str]
                vertices_numeric.append(coords_numeric)
            vertices_numeric = np.array(vertices_numeric).T
            vertices_numeric[:3,:] *= Config.world_scale_factor
            vertices_numeric[0,:] *= Config.scale_x

            all_objects[objname]["verts"] = vertices_numeric

            object_lines = all_objects[objname]["lines"]
            lines_numeric = []
            for line in object_lines:
                lines_str = line.replace("l ", "").replace("\n", "").split(" ")
                lines_inds_numeric = [int(value) for value in lines_str]
                lines_numeric.append(lines_inds_numeric)
            lines_numeric = np.array(lines_numeric)
            lines_numeric -= lines_numeric.min()
            all_objects[objname]["lines"] = lines_numeric

        return all_objects 
Example 35
Source File: serialization.py    From opendr with MIT License 4 votes vote down vote up
def read_obj(filename):

    obj_directory = split(filename)[0]
    lines = open(filename).read().split('\n')

    d = {'v': [], 'vn': [], 'f': [], 'vt': [], 'ft': []}

    mtls = {}
    for line in lines:
        line = line.split()
        if len(line) < 2:
            continue

        key = line[0]
        values = line[1:]

        if key == 'v':
            d['v'].append([np.array([float(v) for v in values[:3]])])
        elif key == 'f':
            spl = [l.split('/') for l in values]
            d['f'].append([np.array([int(l[0])-1 for l in spl[:3]], dtype=np.uint32)])
            if len(spl[0]) > 1 and spl[1] and 'ft' in d:
                d['ft'].append([np.array([int(l[1])-1 for l in spl[:3]])])

            # TOO: redirect to actual vert normals?
            #if len(line[0]) > 2 and line[0][2]:
            #    d['fn'].append([np.concatenate([l[2] for l in spl[:3]])])
        elif key == 'vn':
            d['vn'].append([np.array([float(v) for v in values])])
        elif key == 'vt':
            d['vt'].append([np.array([float(v) for v in values])])
        elif key == 'mtllib':
            fname = join(obj_directory, values[0])
            if not exists(fname):
                fname = values[0]
            if not exists(fname):
                raise Exception("Can't find path %s" % (values[0]))
            _update_mtl(mtls, fname)
        elif key == 'usemtl':
            cur_mtl = mtls[values[0]]

            if 'map_Kd' in cur_mtl:
                src_fname = cur_mtl['map_Kd'][0]
                dst_fname = join(split(cur_mtl['filename'])[0], src_fname)
                if not exists(dst_fname):
                    dst_fname = join(obj_directory, src_fname)
                if not exists(dst_fname):
                    dst_fname = src_fname
                if not exists(dst_fname):
                    raise Exception("Unable to find referenced texture map %s" % (src_fname,))
                else:
                    d['texture_filepath'] = normpath(dst_fname)
                    im = cv2.imread(dst_fname)
                    sz = np.sqrt(np.prod(im.shape[:2]))
                    sz = int(np.round(2 ** np.ceil(np.log(sz) / np.log(2))))
                    d['texture_image'] = cv2.resize(im, (sz, sz)).astype(np.float64)/255.

    for k, v in list(d.items()):
        if k in ['v','vn','f','vt','ft']:
            if v:
                d[k] = np.vstack(v)
            else:
                del d[k]
        else:
            d[k] = v

    result = Minimal(**d)

    return result 
Example 36
Source File: io3d.py    From connecting_the_dots with MIT License 4 votes vote down vote up
def read_obj(path):
  with open(path, 'r') as fp:
    lines = fp.readlines()

  verts = []
  colors = []
  fnorms = []
  fnorm_map = collections.defaultdict(list)
  faces = []
  for line in lines:
    line = line.strip()
    if line.startswith('#') or len(line) == 0:
      continue

    parts = line.split()
    if line.startswith('v '):
      parts = parts[1:]
      x,y,z = float(parts[0]), float(parts[1]), float(parts[2])
      if len(parts) == 4 or len(parts) == 7:
        w = float(parts[3])
        x,y,z = x/w, y/w, z/w
      verts.append((x,y,z))
      if len(parts) >= 6:
        r,g,b = float(parts[-3]), float(parts[-2]), float(parts[-1])
        rgb.append((r,g,b))

    elif line.startswith('vn '):
      parts = parts[1:]
      x,y,z = float(parts[0]), float(parts[1]), float(parts[2])
      fnorms.append((x,y,z))

    elif line.startswith('f '):
      parts = parts[1:]
      if len(parts) != 3:
        raise Exception('only triangle meshes supported atm')
      vidx0, tidx0, nidx0 = _read_obj_split_f(parts[0])
      vidx1, tidx1, nidx1 = _read_obj_split_f(parts[1])
      vidx2, tidx2, nidx2 = _read_obj_split_f(parts[2])

      faces.append((vidx0, vidx1, vidx2))
      if nidx0 >= 0:
        fnorm_map[vidx0].append( nidx0 )
      if nidx1 >= 0:
        fnorm_map[vidx1].append( nidx1 )
      if nidx2 >= 0:
        fnorm_map[vidx2].append( nidx2 )

  verts = np.array(verts)
  colors = np.array(colors)
  fnorms = np.array(fnorms)
  faces = np.array(faces)
  
  # face normals to vertex normals
  norms = np.zeros_like(verts)
  for vidx in fnorm_map.keys():
    ind = fnorm_map[vidx]
    norms[vidx] = fnorms[ind].sum(axis=0)
  N = np.linalg.norm(norms, axis=1, keepdims=True)
  np.divide(norms, N, out=norms, where=N != 0)

  return verts, faces, colors, norms