Python utils.set_logger() Examples

The following are 3 code examples of utils.set_logger(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: generator.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 6 votes vote down vote up
def __init__(self, timestep, window, batch_size, vocab_size, paramSavePath, logPath, input_dim, hidden_size, keep_prob, L, timestr, debug):
		self.name = 'g'
		self.timestep = timestep
		self.hidden_size = hidden_size
		self.input_dim = input_dim
		self.window = window
		self.keep_prob = keep_prob
		self.L = L # options['L'] in author's code, for numerical stability. But why? Author doesn't explain...
		self.paramSavePath = paramSavePath
		self.logPath = logPath
		self.timestr = timestr
		# first input
		self.batch_size = batch_size if not debug else 10
		self.vocab_size = vocab_size
		# self.bhid = params['bhid']
		# self.Vhid = dot(params['Vhid'], self.Wemb) # (500, vocab_size)
		self.logger = set_logger(self.logPath, self.timestr, os.path.basename(__file__))
		self.init_param()

		# lstm = rnn.BasicLSTMCell(num_units=self.hidden_size, state_is_tuple=True)
		# lstm = rnn.DropoutWrapper(cell=lstm, output_keep_prob=keep_prob)
		# outputs, _states = rnn.static_rnn(lstm, z, dtype=tf.float32) 
Example #2
Source File: data.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, dataPath, savePath, paramSavePath, logPath, debug, split_percent, batch_size, timestr, timestep, window):
		'''
			* dataPath is way to find the data. We have two data files.
				One is the real size as described in the paper.
				Another is a much smaller dataset with 100 sentences
				from both arXiv and book dataset used for early code test.
			* debug is the indicator whether we are testing our code or real training.
				default: debug = True, testing code mode.
			# split_percent: training set : validation set : testing set
		'''
		self.debug = debug
		self.savePath = savePath
		self.dataPath = dataPath if not self.debug else '../data/data_pre.txt'
		self.paramSavePath = paramSavePath
		self.logger = set_logger(logPath, timestr, os.path.basename(__file__))
		self.split_percent = split_percent
		self.timestep = timestep
		self.window = window
		self.load_data()
	#   self.data is the list containing all the contents in data file
	#   self.sentSize: how many sentences.
		self.clean_str()
		self.word2num()
	#   self.dataArr: an np.ndarray version of self.data
	#   self.mapToNum is the word - index map. A word's index can be visited by self.mapToNum['word'].
	#   self.dataNum maps words in self.dataStr into number. (np.ndarray)
	#   self.vocabSize is vocabulary size
		self.split_tvt()
	#   self.train training set
	#   self.validation validation set
	#   self.test testing set
	#   self.shift() Shift first 10% of self.dataNum and split tvt sets again.
		self.batch_size = batch_size if not self.debug else 10 
Example #3
Source File: discriminator.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, window, vocab_size, paramSavePath, logPath, input_dim, keep_prob, reuse, generator, timestr, debug):
		# params = {'lambda_r': 0.001, 'lambda_m': 0.001, 'word_dim': 300}
		self.name = 'd'
		self.window = window
		self.vocab_size = vocab_size
		self.input_dim = input_dim
		self.paramSavePath = paramSavePath
		self.logPath = logPath
		self.timestr = timestr
		#self.cnn_out = tf.get_variable(name=self.name + '_f',
		#							shape=[],
		#							initializer=tf.zeros_initializer())
		self.keep_prob = keep_prob
		self.logger = set_logger(self.logPath, self.timestr, os.path.basename(__file__))
		if reuse:
			self.Wemb = generator.Wemb
		else:
			self.Wemb = tf.get_variable(name=self.name + '_Wemb', shape=[self.vocab_size, self.input_dim],
										dtype=tf.float32, initializer=tf.random_uniform_initializer())
		with tf.variable_scope('d'):
			for i, n in enumerate(self.window):
				W = tf.get_variable(name=self.name + '_W' + str(i),
									shape=[n, 1, 1, 1],
									initializer=tf.contrib.layers.xavier_initializer())
				b = tf.get_variable(name=self.name + '_b' + str(i),
									shape=[1],
									initializer=tf.zeros_initializer())
				#c = tf.get_variable(name=self.name + '_c' + str(i), # c is each cnn_out
				#					shape=[-1, self.input_dim],
				#					initializer=tf.zeros_initializer())