Python mido.MidiFile() Examples

The following are 16 code examples of mido.MidiFile(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mido , or try the search function .
Example #1
Source File: midi.py    From tayuya with MIT License 6 votes vote down vote up
def __init__(self, file_path: str, track=0):
        self.midi_file = MidiFile(file_path)

        self.track = track
        self.midi_data = self.midi_file.tracks[track]

        # Get time signature
        ts_meta = list(filter(lambda x: x.type == constants.TIME_SIGNATURE,
                              self.midi_data))
        if ts_meta:
            numerator = ts_meta[0].numerator
            denominator = ts_meta[0].denominator
        else:
            numerator = denominator = 4
        self.time_signature = (numerator, denominator)

        self.stream = stream.Stream()

        if not self.midi_data:
            raise TrackError 
Example #2
Source File: midi_functions.py    From JamBot with MIT License 5 votes vote down vote up
def change_tempo(filename, data_path, target_path):
    mid = mido.MidiFile(data_path + filename)
    new_mid = mido.MidiFile()
    new_mid.ticks_per_beat = mid.ticks_per_beat
    for track in mid.tracks:
        new_track = mido.MidiTrack()
        for msg in track:
            new_msg = msg.copy()
            if new_msg.type == 'set_tempo':
                new_msg.tempo = 500000
#            if msg.type == 'note_on' or msg.type == 'note_off':
            if discretize_time:
                print(msg.time)
                new_msg.time = myround(msg.time, base=mid.ticks_per_beat/(discritezition/4) )
#                msg.time = myround(msg.time, base=mid.ticks_per_beat/(discritezition/4) )
            if offset_time:
#                print('first:', time)
                
                print((mid.ticks_per_beat/(offset/4)))
                new_msg.time = int(msg.time + mid.ticks_per_beat/(offset))
#                print('second:', new_time)
#                print('diff:',time )
#            msg.time = time
            new_track.append(new_msg)
        new_mid.tracks.append(new_track)
    new_mid.save(target_path + filename) 
Example #3
Source File: midi_functions.py    From JamBot with MIT License 5 votes vote down vote up
def change_tempo2(filename, data_path, target_path):
    mid = mido.MidiFile(data_path + filename)
    new_mid = mido.MidiFile()
    new_mid.ticks_per_beat = mid.ticks_per_beat
    for track in mid.tracks:
        new_track = mido.MidiTrack()
        new_mid.tracks.append(new_track)
        for msg in track:
            if msg.type == 'set_tempo':
                print(msg)
                msg.tempo = 500000
                print(msg)
                
            new_track.append(msg)
    new_mid.save(target_path + filename) 
Example #4
Source File: midi_functions.py    From JamBot with MIT License 5 votes vote down vote up
def get_ticks_per_beat(data_path):
    filenames = os.listdir(data_path)
    for filename in filenames:
        try:
            print( MidiFile(data_path + filename).ticks_per_beat)
        except (ValueError, EOFError, IndexError, OSError, KeyError, ZeroDivisionError) as e:
            exception_str = 'Unexpected error in ' + filename  + ':\n', e, sys.exc_info()[0]
            print(exception_str) 
Example #5
Source File: midi.py    From rtmonoaudio2midi with GNU General Public License v3.0 5 votes vote down vote up
def create_midi_file_with_notes(filename, notes, bpm):
    with MidiFile() as midifile:
        track = MidiTrack()
        midifile.tracks.append(track)

        track.append(Message('program_change', program=12, time=0))

        tempo = int((60.0 / bpm) * 1000000)
        track.append(MetaMessage('set_tempo', tempo=tempo))

        sec_per_tick = tempo / 1000000.0 / midifile.ticks_per_beat
        add_notes(track, notes, sec_per_tick)

        midifile.save('{}.mid'.format(filename)) 
Example #6
Source File: visualizer.py    From Piano-LED-Visualizer with MIT License 5 votes vote down vote up
def start_recording(self):
        self.mid = MidiFile(None, None, 0, 20000) #10000 is a ticks_per_beat value
        self.track = MidiTrack()
        self.mid.tracks.append(self.track)                
        self.isrecording = True
        menu.render_message("Recording started", "", 1000)
        self.restart_time()        
        self.messages_to_save = [] 
Example #7
Source File: midi_io.py    From MachineLearning-MusicGeneration with MIT License 5 votes vote down vote up
def midiToPianoroll(filepath, debug = False):
	midi_data = MidiFile(filepath)
	resolution = midi_data.ticks_per_beat
	if debug:
		print ("resolution", resolution)
	set_tempo_events = [x for t in midi_data.tracks for x in t if str(x.type) == 'set_tempo']
	
	tempo = MICROSECONDS_PER_MINUTE/set_tempo_events[0].tempo
	if debug:
		print ("tempo", tempo)
	ticks_per_time_slice = 1.0 * (resolution * tempo * time_per_time_slice)/60 
	if debug:
		print ("ticks_per_time_slice", ticks_per_time_slice)
	
	#Get maximum ticks across all tracks
	total_ticks =0
	for t in midi_data.tracks:
        #since ticks represent delta times we need a cumulative sum to get the total ticks in that track
		sum_ticks = 0
		for e in t:
			if str(e.type) == 'note_on' or str(e.type) == 'note_off' or str(e.type) == 'end_of_track':
				sum_ticks += e.time
				
		if sum_ticks > total_ticks:
			total_ticks = sum_ticks
	if debug:
		print ("total_ticks", total_ticks)

	time_slices = int(ceil(total_ticks / ticks_per_time_slice))
	if debug:
		print ("time_slices", time_slices)

	piano_roll = np.zeros((input_dim, time_slices), dtype =int)

	note_states = {}
	for track in midi_data.tracks:
		total_ticks = 0
		for event in track:
			if str(event.type) == 'note_on' and event.velocity > 0:
				total_ticks += event.time
				time_slice_idx = int(total_ticks / ticks_per_time_slice )

				if event.note <= highest_note and event.note >= lowest_note: 
					note_idx = event.note - lowest_note
					piano_roll[note_idx][time_slice_idx] = 1
					note_states[note_idx] = time_slice_idx

			elif str(event.type) == 'note_off' or ( str(event.type) == 'note_on' and event.velocity == 0 ):
				note_idx = event.note - lowest_note
				total_ticks += event.time
				time_slice_idx = int(total_ticks /ticks_per_time_slice )

				if note_idx in note_states:	
					last_time_slice_index = note_states[note_idx]
					piano_roll[note_idx][last_time_slice_index:time_slice_idx] = 1
					del note_states[note_idx]
	return piano_roll.T

#preprocess data directory 
Example #8
Source File: midi_io.py    From MachineLearning-MusicGeneration with MIT License 5 votes vote down vote up
def pianorollToMidi(piano_roll, filepath):
    #ensure that resolution is an integer 
	ticks_per_time_slice=1 # hard-coded, arbitrary but needs to be >= 1 and an integer to avoid distortion
	tempo = 1/time_per_time_slice
	resolution = 60*ticks_per_time_slice/(tempo*time_per_time_slice)

	mid = MidiFile(ticks_per_beat = int(resolution))
	track = MidiTrack()
	mid.tracks.append(track)
	track.append(MetaMessage('set_tempo', tempo = int(MICROSECONDS_PER_MINUTE/tempo), time =0))

	current_state = np.zeros(input_dim)

	index_of_last_event = 0

	for slice_index, time_slice in enumerate(np.concatenate((piano_roll, np.zeros((1, input_dim))), axis =0)):
		note_changes = time_slice - current_state
		
		for note_idx, note in enumerate(note_changes):
			if note == 1:
				note_event = Message('note_on', time = (slice_index - index_of_last_event)*ticks_per_time_slice, velocity = 65, note = note_idx + lowest_note )
				track.append(note_event)
				index_of_last_event = slice_index
			elif note == -1:
				note_event = Message('note_off', time = (slice_index - index_of_last_event)*ticks_per_time_slice, velocity = 65, note = note_idx + lowest_note )
				track.append(note_event)
				index_of_last_event = slice_index

		current_state = time_slice

	eot = MetaMessage('end_of_track', time=1)
	track.append(eot)
	
	mid.save(filepath) 
Example #9
Source File: midi.py    From pytorch-music-composer with MIT License 5 votes vote down vote up
def __init__(self, file):
        self.mid = MidiFile(file)
        self.ppq, self.bpm, self.millis, self.ticks_per_measure, self.multi_time_sig, self.multi_tempo\
            = self.get_info()
        self.duration = self.mid.length 
Example #10
Source File: midi.py    From pytorch-music-composer with MIT License 5 votes vote down vote up
def samples_to_midi(samples, file, ticks_per_beat=48, thresh=0.5):
        mid = MidiFile()
        track = MidiTrack()
        mid.tracks.append(track)

        track.append(Message('program_change', program=4))

        mid.ticks_per_beat = ticks_per_beat
        ticks_per_measure = 4 * ticks_per_beat
        ticks_per_sample = ticks_per_measure / samples_per_measure

        # note_on channel=1 note=44 velocity=127 time=816
        # note_off channel=1 note=44 velocity=64 time=24

        abs_time = 0
        last_time = 0
        for sample in samples:
            for y in range(sample.shape[0]):
                abs_time += ticks_per_sample
                for x in range(sample.shape[1]):
                    note = int(x + (128 - num_notes) / 2)
                    if sample[y, x] >= thresh and (y == 0 or sample[y - 1, x] < thresh):
                        delta_time = abs_time - last_time
                        track.append(Message('note_on', note=note, velocity=int(sample[y,x]*127), time=int(delta_time)))
                        last_time = abs_time
                    elif sample[y, x] < thresh and (y == sample.shape[0] - 1 or sample[y - 1, x] > thresh):
                        delta_time = abs_time - last_time
                        track.append(Message('note_off', note=note, velocity=int(sample[y,x]*127), time=int(delta_time)))
                        last_time = abs_time
        mid.save(file) 
Example #11
Source File: midi.py    From onsets-and-frames with MIT License 5 votes vote down vote up
def parse_midi(path):
    """open midi file and return np.array of (onset, offset, note, velocity) rows"""
    midi = mido.MidiFile(path)

    time = 0
    sustain = False
    events = []
    for message in midi:
        time += message.time

        if message.type == 'control_change' and message.control == 64 and (message.value >= 64) != sustain:
            # sustain pedal state has just changed
            sustain = message.value >= 64
            event_type = 'sustain_on' if sustain else 'sustain_off'
            event = dict(index=len(events), time=time, type=event_type, note=None, velocity=0)
            events.append(event)

        if 'note' in message.type:
            # MIDI offsets can be either 'note_off' events or 'note_on' with zero velocity
            velocity = message.velocity if message.type == 'note_on' else 0
            event = dict(index=len(events), time=time, type='note', note=message.note, velocity=velocity, sustain=sustain)
            events.append(event)

    notes = []
    for i, onset in enumerate(events):
        if onset['velocity'] == 0:
            continue

        # find the next note_off message
        offset = next(n for n in events[i + 1:] if n['note'] == onset['note'] or n is events[-1])

        if offset['sustain'] and offset is not events[-1]:
            # if the sustain pedal is active at offset, find when the sustain ends
            offset = next(n for n in events[offset['index'] + 1:] if n['type'] == 'sustain_off' or n is events[-1])

        note = (onset['time'], offset['time'], onset['note'], onset['velocity'])
        notes.append(note)

    return np.array(notes) 
Example #12
Source File: midi.py    From onsets-and-frames with MIT License 5 votes vote down vote up
def save_midi(path, pitches, intervals, velocities):
    """
    Save extracted notes as a MIDI file
    Parameters
    ----------
    path: the path to save the MIDI file
    pitches: np.ndarray of bin_indices
    intervals: list of (onset_index, offset_index)
    velocities: list of velocity values
    """
    file = MidiFile()
    track = MidiTrack()
    file.tracks.append(track)
    ticks_per_second = file.ticks_per_beat * 2.0

    events = []
    for i in range(len(pitches)):
        events.append(dict(type='on', pitch=pitches[i], time=intervals[i][0], velocity=velocities[i]))
        events.append(dict(type='off', pitch=pitches[i], time=intervals[i][1], velocity=velocities[i]))
    events.sort(key=lambda row: row['time'])

    last_tick = 0
    for event in events:
        current_tick = int(event['time'] * ticks_per_second)
        velocity = int(event['velocity'] * 127)
        if velocity > 127:
            velocity = 127
        pitch = int(round(hz_to_midi(event['pitch'])))
        track.append(Message('note_' + event['type'], note=pitch, velocity=velocity, time=current_tick - last_tick))
        last_tick = current_tick

    file.save(path) 
Example #13
Source File: midi.py    From NoiseMaker with GNU General Public License v3.0 5 votes vote down vote up
def readfile(file_name):
    mid = mido.MidiFile(file_name)
    return mid.tracks, mid.ticks_per_beat  # 参数2为每拍多少个ticks 
Example #14
Source File: midi.py    From NoiseMaker with GNU General Public License v3.0 5 votes vote down vote up
def multi_pianoroll_to_midi(file_name, bpm, pianoroll_dic):
    # 1.初始化
    mid = mido.MidiFile()
    tracks = {}  # 要保存的音轨信息
    first_track = True
    midi_tempo = round(60000000 / bpm)  # 这首歌的速度(每一拍多少微秒)
    # 2.保存音符
    for key in pianoroll_dic:
        # 2.1.定义音轨名称/使用乐器等
        tracks[key] = mido.MidiTrack()  # 定义新的音轨
        mid.tracks.append(tracks[key])  # 在midi中添加这个音轨

        if first_track:
            tracks[key].append(mido.MetaMessage('set_tempo', tempo=midi_tempo, time=0))  # 设置歌曲的速度
            first_track = False
        tracks[key].append(mido.MetaMessage('track_name', name=pianoroll_dic[key]['name'], time=0))  # 这个音轨的名称
        tracks[key].append(mido.Message('program_change', program=pianoroll_dic[key]['program'], time=0, channel=key))  # 这个音轨使用的乐器
        # 2.2.从piano_dict中获取音符列表并转化为midi message的形式
        note_list = []
        for note_it in pianoroll_dic[key]['note']:
            note_list.append(['on', note_it[0], note_it[1], note_it[2]])
            note_list.append(['off', note_it[0] + note_it[3], note_it[1], note_it[2]])
        note_list = sorted(note_list, key=lambda item: item[1])  # 按照音符的时间排序
        # 2.3.往tracks中保存这些音符
        current_note_time = 0
        for note_it in note_list:
            if note_it[0] == 'on':
                tracks[key].append(mido.Message('note_on', note=note_it[2], velocity=note_it[3], time=round(480 * (note_it[1] - current_note_time)), channel=key))
            elif note_it[0] == 'off':
                tracks[key].append(mido.Message('note_off', note=note_it[2], velocity=note_it[3], time=round(480 * (note_it[1] - current_note_time)), channel=key))
            current_note_time = note_it[1]
    # 3.保存这个midi文件
    mid.save(file_name) 
Example #15
Source File: midi_functions.py    From JamBot with MIT License 4 votes vote down vote up
def get_type(filepath):
    return mido.MidiFile(filepath).type 
Example #16
Source File: visualizer.py    From Piano-LED-Visualizer with MIT License 4 votes vote down vote up
def play_midi(song_path):
    midiports.pending_queue.append(mido.Message('note_on'))
    
    if song_path in  saving.is_playing_midi.keys():
        menu.render_message(song_path, "Already playing", 2000)
        return
    
    saving.is_playing_midi.clear()
    
    saving.is_playing_midi[song_path] = True
    menu.render_message("Playing: ", song_path, 2000)
    saving.t = threading.currentThread()    

    output_time_last = 0
    delay_debt = 0;
    try:   
        mid = mido.MidiFile("Songs/"+song_path)
        fastColorWipe(ledstrip.strip, True)
        #length = mid.length        
        t0 = False        
        for message in mid:
            if song_path in saving.is_playing_midi.keys():
                if(t0 == False):
                    t0 = time.time()
                    output_time_start = time.time()            
                output_time_last = time.time() - output_time_start
                delay_temp = message.time - output_time_last
                delay = message.time - output_time_last - float(0.003) + delay_debt
                if(delay > 0):
                    time.sleep(delay)
                    delay_debt = 0
                else:
                    delay_debt += delay_temp
                output_time_start = time.time()                   
            
                if not message.is_meta:
                    midiports.playport.send(message)
                    midiports.pending_queue.append(message.copy(time=0))
                
            else:                
                break
        #print('play time: {:.2f} s (expected {:.2f})'.format(
                #time.time() - t0, length))
        #saving.is_playing_midi = False
    except:
        menu.render_message(song_path, "Can't play this file", 2000)