update
This commit is contained in:
parent
b7ed7b1220
commit
8ad6672ed6
8 changed files with 177 additions and 70 deletions
|
@ -13,6 +13,7 @@ from functions_compat import trackfx2fxrack
|
|||
|
||||
from functions_compat import autopl_remove
|
||||
from functions_compat import changestretch
|
||||
from functions_compat import fxrack_moveparams
|
||||
from functions_compat import loops_add
|
||||
from functions_compat import loops_remove
|
||||
from functions_compat import removecut
|
||||
|
@ -21,7 +22,7 @@ from functions_compat import time_seconds
|
|||
from functions_compat import timesigblocks
|
||||
from functions_compat import trackpl_add
|
||||
from functions_compat import unhybrid
|
||||
from functions_compat import fxrack_moveparams
|
||||
from functions_compat import sep_nest_audio
|
||||
|
||||
import json
|
||||
import math
|
||||
|
@ -48,6 +49,7 @@ def set_dawcapabilities(in_dawcapabilities, out_dawcapabilities):
|
|||
|
||||
list__setdc['placement_cut'] = arg__dc['placement_cut'] if 'placement_cut' in arg__dc else False
|
||||
list__setdc['placement_loop'] = arg__dc['placement_loop'] if 'placement_loop' in arg__dc else []
|
||||
list__setdc['placement_audio_nested'] = arg__dc['placement_audio_nested'] if 'placement_audio_nested' in arg__dc else False
|
||||
|
||||
list__setdc['fxrack'] = arg__dc['fxrack'] if 'fxrack' in arg__dc else False
|
||||
list__setdc['fxrack_params'] = arg__dc['fxrack_params'] if 'fxrack_params' in arg__dc else ['vol','enabled']
|
||||
|
@ -56,6 +58,7 @@ def set_dawcapabilities(in_dawcapabilities, out_dawcapabilities):
|
|||
list__setdc['time_seconds'] = arg__dc['time_seconds'] if 'time_seconds' in arg__dc else False
|
||||
list__setdc['placement_audio_stretch'] = arg__dc['placement_audio_stretch'] if 'placement_audio_stretch' in arg__dc else []
|
||||
|
||||
print('[compat] '+str(in__dc['placement_audio_nested']).ljust(5)+' | '+str(out__dc['placement_audio_nested']).ljust(5)+' | placement_audio_nested')
|
||||
print('[compat] '+str(in__dc['placement_cut']).ljust(5)+' | '+str(out__dc['placement_cut']).ljust(5)+' | placement_cut')
|
||||
print('[compat] '+str(in__dc['placement_loop']).ljust(5)+' | '+str(out__dc['placement_loop']).ljust(5)+' | placement_loop')
|
||||
|
||||
|
@ -99,6 +102,7 @@ def makecompat(cvpj_l, cvpj_type):
|
|||
process_part('trackpl_add', trackpl_add, cvpj_proj, cvpj_type, in__dc['track_nopl'], out__dc['track_nopl'])
|
||||
process_part('loops_remove', loops_remove, cvpj_proj, cvpj_type, in__dc['placement_loop'], out__dc['placement_loop'])
|
||||
process_part('removecut', removecut, cvpj_proj, cvpj_type, in__dc['placement_cut'], out__dc['placement_cut'])
|
||||
process_part('sep_nest_audio', sep_nest_audio, cvpj_proj, cvpj_type, in__dc['placement_audio_nested'], out__dc['placement_audio_nested'])
|
||||
process_part('loops_add', loops_add, cvpj_proj, cvpj_type, in__dc['placement_loop'], out__dc['placement_loop'])
|
||||
|
||||
process_part('time_seconds', time_seconds, cvpj_proj, cvpj_type, in__dc['time_seconds'], out__dc['time_seconds'])
|
||||
|
|
48
functions_compat/sep_nest_audio.py
Normal file
48
functions_compat/sep_nest_audio.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
# SPDX-FileCopyrightText: 2023 SatyrDiamond
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from functions import xtramath
|
||||
from functions import placement_data
|
||||
|
||||
def do_placements(cvpj_placements, out__placement_loop):
|
||||
new_placements = []
|
||||
for cvpj_placement in cvpj_placements:
|
||||
main_pos = cvpj_placement['position']
|
||||
main_dur = cvpj_placement['duration']
|
||||
main_cut = cvpj_placement['cut'] if 'cut' in cvpj_placement else None
|
||||
main_off = 0
|
||||
if main_cut != None: main_off = main_cut['start']
|
||||
main_events = cvpj_placement['events']
|
||||
trimpls = placement_data.audiotrim(main_events, main_pos-main_off,main_off, main_off+main_dur)
|
||||
for trimpl in trimpls:
|
||||
new_placements.append(trimpl)
|
||||
|
||||
return new_placements
|
||||
|
||||
def process_r(projJ, out__placement_loop):
|
||||
if 'track_placements' in projJ:
|
||||
for track_placements_id in projJ['track_placements']:
|
||||
track_placements_data = projJ['track_placements'][track_placements_id]
|
||||
|
||||
not_laned = True
|
||||
|
||||
if 'laned' in track_placements_data:
|
||||
if track_placements_data['laned'] == 1:
|
||||
not_laned = False
|
||||
s_lanedata = track_placements_data['lanedata']
|
||||
s_laneordering = track_placements_data['laneorder']
|
||||
for t_lanedata in s_lanedata:
|
||||
tj_lanedata = s_lanedata[t_lanedata]
|
||||
if 'audio_nested' in tj_lanedata:
|
||||
tj_lanedata['audio'] = do_placements(tj_lanedata['audio_nested'], out__placement_loop)
|
||||
del tj_lanedata['audio_nested']
|
||||
|
||||
if not_laned == True:
|
||||
if 'audio_nested' in track_placements_data:
|
||||
track_placements_data['audio'] = do_placements(track_placements_data['audio_nested'], out__placement_loop)
|
||||
del track_placements_data['audio_nested']
|
||||
return True
|
||||
|
||||
def process(projJ, cvpj_type, in__placement_loop, out__placement_loop):
|
||||
if cvpj_type in ['r', 'ri', 'rm']: return process_r(projJ, out__placement_loop)
|
||||
else: return False
|
|
@ -20,7 +20,7 @@ def process_r(cvpj_l):
|
|||
if track_placement_type == 'notes':
|
||||
new_trackdata[split_cvpj_trackid]['type'] = 'instrument'
|
||||
print('Notes', end=' ')
|
||||
if track_placement_type == 'audio':
|
||||
if track_placement_type in ['audio', 'audio_nested']:
|
||||
new_trackdata[split_cvpj_trackid]['type'] = 'audio'
|
||||
print('Audio', end=' ')
|
||||
new_trackpl[split_cvpj_trackid] = {}
|
||||
|
|
|
@ -174,6 +174,8 @@ def add_track(startpos, midicmds):
|
|||
elif midicmd[0] == 'control_change':
|
||||
if midicmd[2] == 0: t_chan_current_inst[midicmd[1]][1] = midicmd[3]
|
||||
elif midicmd[2] == 111: loop_data[0] = track_curpos/ppq_step
|
||||
elif midicmd[2] == 116: loop_data[0] = track_curpos/ppq_step
|
||||
elif midicmd[2] == 117: loop_data[1] = track_curpos/ppq_step
|
||||
else: add_chautopoint(track_curpos, midicmd[1], midicmd[2], midicmd[3])
|
||||
|
||||
elif midicmd[0] == 'pitchwheel': add_chautopoint(track_curpos, midicmd[1], 'pitch', midicmd[2])
|
||||
|
@ -203,7 +205,10 @@ def add_track(startpos, midicmds):
|
|||
if nameval[0] == 'use_rhythm':
|
||||
auto_chanmode[groups[2]-1][track_curpos] = bool(nameval[1])
|
||||
|
||||
elif midicmd[0] == 'marker': add_point(auto_markers, track_curpos, midicmd[1])
|
||||
elif midicmd[0] == 'marker':
|
||||
add_point(auto_markers, track_curpos, midicmd[1])
|
||||
if midicmd[1] == 'loopStart': loop_data[0] = track_curpos/ppq_step
|
||||
if midicmd[1] == 'loopEnd': loop_data[1] = track_curpos/ppq_step
|
||||
|
||||
elif midicmd[0] == 'text':
|
||||
if ______debugtxt______: print('TEXT', midicmd[1])
|
||||
|
|
|
@ -306,7 +306,8 @@ class input_amped(plugin_input.base):
|
|||
'placement_cut': True,
|
||||
'placement_loop': [],
|
||||
'track_hybrid': True,
|
||||
'placement_audio_stretch': ['rate']
|
||||
'placement_audio_stretch': ['rate'],
|
||||
'placement_audio_nested': True
|
||||
}
|
||||
def parse(self, input_file, extra_param):
|
||||
global cvpj_l
|
||||
|
@ -389,7 +390,9 @@ class input_amped(plugin_input.base):
|
|||
tracks_r.add_pl(cvpj_l, amped_tr_id, 'notes', cvpj_placement_notes)
|
||||
|
||||
if amped_reg_clips != []:
|
||||
temp_pls = []
|
||||
cvpj_placement_audionested = cvpj_placement_base.copy()
|
||||
clip_events = []
|
||||
|
||||
for amped_reg_clip in amped_reg_clips:
|
||||
temp_pl = {}
|
||||
temp_pl['file'] = get_contentGuid(amped_reg_clip['contentGuid'])
|
||||
|
@ -404,12 +407,11 @@ class input_amped(plugin_input.base):
|
|||
amped_reg_clip_offset = amped_reg_clip['offset']*4
|
||||
if amped_reg_clip_offset != 0:
|
||||
temp_pl['cut'] = {'type': 'cut', 'start':amped_reg_clip_offset}
|
||||
temp_pls.append(temp_pl)
|
||||
clip_events.append(temp_pl)
|
||||
|
||||
cvpj_placement_audionested['events'] = clip_events
|
||||
tracks_r.add_pl(cvpj_l, amped_tr_id, 'audio_nested', cvpj_placement_audionested)
|
||||
|
||||
trimpls = placement_data.audiotrim(temp_pls, amped_reg_position-amped_reg_offset,amped_reg_offset, amped_reg_offset+amped_reg_length)
|
||||
|
||||
for temp_pl in trimpls:
|
||||
temp_pl['color'] = amped_colors[amped_reg_color]
|
||||
tracks_r.add_pl(cvpj_l, amped_tr_id, 'audio', temp_pl)
|
||||
|
||||
return json.dumps(cvpj_l)
|
||||
|
|
|
@ -4,30 +4,75 @@
|
|||
from functions import data_bytes
|
||||
from functions import colors
|
||||
from functions import song
|
||||
from functions import note_data
|
||||
from functions import placement_data
|
||||
import plugin_input
|
||||
import json
|
||||
import zipfile
|
||||
import os
|
||||
import xml.etree.ElementTree as ET
|
||||
from functions_tracks import tracks_r
|
||||
|
||||
def getvalue(varx, name, fallbackval):
|
||||
if len(varx.findall(name)) != 0:
|
||||
varxi = varx.findall(name)[0]
|
||||
return varxi.get('value'), varxi.get('name')
|
||||
varvalue = varxi.get('value')
|
||||
varunit = varxi.get('unit')
|
||||
if varxi.get('unit') == 'normalized': varvalue = (float(varvalue)-0.5)*2
|
||||
return varvalue, varxi.get('name')
|
||||
else:
|
||||
return fallbackval
|
||||
return fallbackval, name
|
||||
|
||||
|
||||
|
||||
def parse_notelist(dpx_clip):
|
||||
dpx_notes = dpx_clip.findall('Notes')[0]
|
||||
dpx_notelist = dpx_notes.findall('Note')
|
||||
cvpj_notelist = []
|
||||
for dpx_note in dpx_notelist:
|
||||
cvpj_note = note_data.rx_makenote(float(dpx_note.get('time'))*4, float(dpx_note.get('duration'))*4, int(dpx_note.get('key'))-60, float(dpx_note.get('vel')), None)
|
||||
if dpx_note.get('release') != None: cvpj_note["release"] = float(dpx_note.get('release'))
|
||||
if dpx_note.get('channel') != None: cvpj_note["channel"] = int(dpx_note.get('channel'))
|
||||
|
||||
cvpj_notemod = {}
|
||||
if dpx_note.findall('Points') != []: parse_note_points(cvpj_notemod, dpx_note.findall('Points')[0])
|
||||
if dpx_note.findall('Lanes') != []:
|
||||
xml_lanes = dpx_note.findall('Lanes')[0]
|
||||
if xml_lanes.findall('Points') != []:
|
||||
for pointxml in xml_lanes.findall('Points'):
|
||||
parse_note_points(cvpj_notemod, pointxml)
|
||||
if cvpj_notemod != {}: cvpj_note["notemod"] = cvpj_notemod
|
||||
cvpj_notelist.append(cvpj_note)
|
||||
return cvpj_notelist
|
||||
|
||||
def calc_time(dpx_clip, dp_tempo):
|
||||
dpx_p_timetype = dpx_clip.get('contentTimeUnit')
|
||||
|
||||
calctempo = (dp_tempo/120)
|
||||
|
||||
dpx_p_time = dpx_clip.get('time')
|
||||
dpx_p_duration = dpx_clip.get('duration')
|
||||
dpx_p_playStart = dpx_clip.get('playStart')
|
||||
dpx_p_playStop = dpx_clip.get('playStop')
|
||||
|
||||
if dpx_p_timetype == 'beats':
|
||||
if dpx_p_time != None: dpx_p_time = float(dpx_p_time)*4
|
||||
if dpx_p_duration != None: dpx_p_duration = float(dpx_p_duration)*4
|
||||
if dpx_p_playStop != None: dpx_p_playStop = float(dpx_p_playStop)*4
|
||||
if dpx_p_playStop != None: dpx_p_playStop = float(dpx_p_playStop)*4
|
||||
if dpx_p_timetype == 'seconds':
|
||||
if dpx_p_time != None: dpx_p_time = (float(dpx_p_time)*8)*calctempo
|
||||
if dpx_p_duration != None: dpx_p_duration = (float(dpx_p_duration)*16)*calctempo
|
||||
if dpx_p_playStart != None: dpx_p_playStart = (float(dpx_p_playStart)*16)*calctempo
|
||||
if dpx_p_playStop != None: dpx_p_playStop = (float(dpx_p_playStop)*16)*calctempo
|
||||
|
||||
return dpx_p_time, dpx_p_duration, dpx_p_playStart, dpx_p_playStop
|
||||
|
||||
|
||||
def dp_parse_trackinfo(dpx_track):
|
||||
global cvpj_l
|
||||
global trackchanid
|
||||
|
||||
track_data = cvpj_l['track_data']
|
||||
track_order = cvpj_l['track_order']
|
||||
track_return = cvpj_l['track_return']
|
||||
track_master = cvpj_l['track_master']
|
||||
|
||||
cvpj_l_track = {}
|
||||
|
||||
dpt_contentType = dpx_track.get('contentType')
|
||||
|
@ -37,6 +82,9 @@ def dp_parse_trackinfo(dpx_track):
|
|||
dpt_role = dpx_chan.get('role')
|
||||
dpt_cid = dpx_chan.get('id')
|
||||
dpt_color = dpx_track.get('color')
|
||||
|
||||
if '#' in dpt_color: dpt_color = colors.hex_to_rgb_float(dpt_color[1:7])
|
||||
|
||||
dpt_name = dpx_track.get('name')
|
||||
track_role = None
|
||||
|
||||
|
@ -92,6 +140,7 @@ class input_dawproject(plugin_input.base):
|
|||
'track_lanes': False,
|
||||
'placement_cut': True,
|
||||
'placement_loop': ['loop', 'loop_off', 'loop_adv'],
|
||||
'placement_audio_nested': True,
|
||||
'auto_nopl': True,
|
||||
'track_nopl': False
|
||||
}
|
||||
|
@ -99,6 +148,7 @@ class input_dawproject(plugin_input.base):
|
|||
def parse(self, input_file, extra_param):
|
||||
global cvpj_l
|
||||
global trackchanid
|
||||
global samplefolder
|
||||
|
||||
zip_data = zipfile.ZipFile(input_file, 'r')
|
||||
|
||||
|
@ -108,12 +158,7 @@ class input_dawproject(plugin_input.base):
|
|||
dp_timesig = [4,4]
|
||||
dp_tempo = 140
|
||||
|
||||
cvpj_l['track_data'] = {}
|
||||
cvpj_l['track_order'] = []
|
||||
cvpj_l['track_return'] = {}
|
||||
cvpj_l['track_placements'] = {}
|
||||
|
||||
cvpj_l['track_master'] = {}
|
||||
samplefolder = extra_param['samplefolder']
|
||||
|
||||
if 'project.xml' in zip_data.namelist():
|
||||
dpx_project = ET.fromstring(zip_data.read('project.xml'))
|
||||
|
@ -161,59 +206,58 @@ class input_dawproject(plugin_input.base):
|
|||
if dpx_trklane.findall('Clips') != []:
|
||||
dpx_clips = dpx_trklane.findall('Clips')[0]
|
||||
for dpx_clip in dpx_clips.findall('Clip'):
|
||||
dpx_p_time = dpx_clip.get('time')
|
||||
dpx_p_duration = dpx_clip.get('duration')
|
||||
dpx_p_playStart = dpx_clip.get('playStart')
|
||||
|
||||
dpx_p_loopStart = dpx_clip.get('loopStart')
|
||||
dpx_p_loopEnd = dpx_clip.get('loopEnd')
|
||||
|
||||
cvpj_pldata = {}
|
||||
cvpj_pldata["position"] = float(dpx_p_time)*4
|
||||
cvpj_pldata["duration"] = float(dpx_p_duration)*4
|
||||
cvpj_pldata["notelist"] = []
|
||||
|
||||
#print(' ----- ', end="")
|
||||
if dpx_p_loopStart == None and dpx_p_loopEnd == None:
|
||||
#print('Cut', dpx_p_playStart, dpx_p_duration)
|
||||
cvpj_pldata["cut"] = {}
|
||||
cvpj_pldata["cut"]['type'] = 'cut'
|
||||
cvpj_pldata["cut"]['start'] = float(dpx_p_playStart)*4
|
||||
cvpj_pldata["cut"]['end'] = (float(dpx_p_duration)+float(dpx_p_playStart))*4
|
||||
elif dpx_p_loopStart != None and dpx_p_loopEnd != None:
|
||||
#print('loop', dpx_p_playStart, dpx_p_duration, dpx_p_loopStart,dpx_p_loopEnd)
|
||||
cvpj_pldata["cut"] = placement_data.cutloopdata(float(dpx_p_playStart)*4, float(dpx_p_loopStart)*4, float(dpx_p_loopEnd)*4)
|
||||
|
||||
if dpx_clip.findall('Notes') != []:
|
||||
dpx_notes = dpx_clip.findall('Notes')[0]
|
||||
dpx_notelist = dpx_notes.findall('Note')
|
||||
for dpx_note in dpx_notelist:
|
||||
cvpj_note = {}
|
||||
cvpj_note["position"] = float(dpx_note.get('time'))*4
|
||||
cvpj_note["duration"] = float(dpx_note.get('duration'))*4
|
||||
cvpj_note["key"] = int(dpx_note.get('key'))-60
|
||||
cvpj_note["vol"] = float(dpx_note.get('vel'))
|
||||
cvpj_note["release"] = float(dpx_note.get('rel'))
|
||||
if dpx_note.get('channel') != None: cvpj_note["channel"] = int(dpx_note.get('channel'))
|
||||
|
||||
cvpj_notemod = {}
|
||||
|
||||
cvpj_auto_points = None
|
||||
if dpx_note.findall('Points') != []:
|
||||
parse_note_points(cvpj_notemod, dpx_note.findall('Points')[0])
|
||||
|
||||
if dpx_note.findall('Lanes') != []:
|
||||
xml_lanes = dpx_note.findall('Lanes')[0]
|
||||
if xml_lanes.findall('Points') != []:
|
||||
for pointxml in xml_lanes.findall('Points'):
|
||||
parse_note_points(cvpj_notemod, pointxml)
|
||||
|
||||
if cvpj_notemod != {}: cvpj_note["notemod"] = cvpj_notemod
|
||||
|
||||
cvpj_pldata["notelist"].append(cvpj_note)
|
||||
dpx_p_time, dpx_p_duration, dpx_p_playStart, dpx_p_playStop = calc_time(dpx_clip, dp_tempo)
|
||||
|
||||
for dpx_clipnotes in dpx_clip.findall('Notes'):
|
||||
cvpj_pldata = placement_data.makepl_n(dpx_p_time, dpx_p_duration, parse_notelist(dpx_clipnotes))
|
||||
if dpx_p_loopStart == None and dpx_p_loopEnd == None: cvpj_pldata["cut"] = {'type': 'cut', 'start': float(dpx_p_playStart)*4}
|
||||
elif dpx_p_loopStart != None and dpx_p_loopEnd != None: cvpj_pldata["cut"] = placement_data.cutloopdata(float(dpx_p_playStart)*4, float(dpx_p_loopStart)*4, float(dpx_p_loopEnd)*4)
|
||||
tracks_r.add_pl(cvpj_l, trackidchan, 'notes', cvpj_pldata)
|
||||
|
||||
for dpx_lanes in dpx_clip.findall('Lanes'):
|
||||
if dpx_lanes.findall('Notes') != []:
|
||||
cvpj_pldata = placement_data.makepl_n(dpx_p_time, dpx_p_duration, parse_notelist(dpx_lanes))
|
||||
if dpx_p_loopStart == None and dpx_p_loopEnd == None: cvpj_pldata["cut"] = {'type': 'cut', 'start': float(dpx_p_playStart)*4}
|
||||
elif dpx_p_loopStart != None and dpx_p_loopEnd != None: cvpj_pldata["cut"] = placement_data.cutloopdata(float(dpx_p_playStart)*4, float(dpx_p_loopStart)*4, float(dpx_p_loopEnd)*4)
|
||||
tracks_r.add_pl(cvpj_l, trackidchan, 'notes', cvpj_pldata)
|
||||
|
||||
for dpx_clipp in dpx_clip.findall('Clips'):
|
||||
|
||||
#print('audio', dpx_p_time,dpx_p_playStop)
|
||||
|
||||
clip_events = []
|
||||
|
||||
pl_data = {}
|
||||
pl_data['position'] = dpx_p_time
|
||||
pl_data['duration'] = dpx_p_playStop
|
||||
|
||||
for dpx_insideclipp in dpx_clipp.findall('Clip'):
|
||||
|
||||
in_time, in_duration, in_playStart, in_playStop = calc_time(dpx_insideclipp, dp_tempo)
|
||||
#print(in_time, in_duration, in_playStart, in_playStop, dpx_insideclipp.findall('Audio'))
|
||||
inside_clip = {}
|
||||
inside_clip['position'] = in_time
|
||||
inside_clip['duration'] = in_duration
|
||||
audio_data = dpx_insideclipp.findall('Audio')
|
||||
if audio_data:
|
||||
audio_file_data = audio_data[0].findall('File')
|
||||
if audio_file_data:
|
||||
audio_file_external = audio_file_data[0].get('external')
|
||||
audio_file_path = audio_file_data[0].get('path')
|
||||
|
||||
if audio_file_path != None:
|
||||
zip_data.extract(audio_file_path, path=samplefolder, pwd=None)
|
||||
inside_clip['file'] = os.path.join(samplefolder,audio_file_path)
|
||||
|
||||
clip_events.append(inside_clip)
|
||||
|
||||
pl_data['events'] = clip_events
|
||||
tracks_r.add_pl(cvpj_l, trackidchan, 'audio_nested', pl_data)
|
||||
|
||||
|
||||
cvpj_l['use_instrack'] = False
|
||||
cvpj_l['use_fxrack'] = False
|
||||
|
|
|
@ -196,7 +196,7 @@ class output_cvpj(plugin_output.base):
|
|||
if 'cut' in s_trkplacement:
|
||||
trkplcut = s_trkplacement['cut']
|
||||
if s_trkplacement['cut']['type'] == 'cut':
|
||||
x_arr_lanes_clip.set('duration', str((trkplcut['end'] - s_trkplacement['cut']['start'])/4))
|
||||
x_arr_lanes_clip.set('duration', str(((s_trkplacement['cut']['start']+s_trkplacement['duration']) - s_trkplacement['cut']['start'])/4))
|
||||
x_arr_lanes_clip.set('playStart', str(trkplcut['start']/4))
|
||||
if s_trkplacement['cut']['type'] == ['loop', 'loop_off', 'loop_adv']:
|
||||
x_arr_lanes_clip.set('duration', str(s_trkplacement['duration']/4))
|
||||
|
@ -207,6 +207,8 @@ class output_cvpj(plugin_output.base):
|
|||
x_arr_lanes_clip.set('time', str(s_trkplacement['position']/4))
|
||||
x_arr_lanes_clip.set('duration', str(s_trkplacement['duration']/4))
|
||||
x_arr_lanes_clip.set('playStart', '0.0')
|
||||
|
||||
|
||||
if 'notelist' in s_trkplacement:
|
||||
s_trknotelist = s_trkplacement['notelist']
|
||||
nlidcount = 1
|
||||
|
|
|
@ -399,6 +399,8 @@ class output_cvpjs(plugin_output.base):
|
|||
FL_playlistitem['patternbase'] = 20480
|
||||
FL_playlistitem['itemindex'] = sampleid
|
||||
FL_playlistitem['length'] = int((CVPJ_Placement['duration']*ppq)/4)
|
||||
FL_playlistitem['startoffset'] = 0
|
||||
FL_playlistitem['endoffset'] = CVPJ_Placement['duration']
|
||||
|
||||
FL_playlistitem['unknown1'] = 120
|
||||
FL_playlistitem['unknown2'] = 25664
|
||||
|
|
Loading…
Reference in a new issue