Compare commits

...

2 Commits

Author SHA1 Message Date
SatyrDiamond c13744a980 update 2023-11-17 20:11:42 -05:00
SatyrDiamond 76f8eda743 update 2023-11-15 19:23:22 -05:00
41 changed files with 1290 additions and 457 deletions

54
data_dset/1bitdragon.dset Normal file
View File

@ -0,0 +1,54 @@
{
"track": {
"colorset": {
"main": [
[
0.14,
1.0,
0.6
],
[
1.0,
0.87,
0.18
],
[
0.76,
0.41,
1.0
],
[
0.97,
0.51,
0.0
],
[
0.76,
0.76,
0.76
],
[
0.76,
0.58,
0.38
],
[
0.47,
0.49,
0.88
],
[
0.59,
0.73,
0.23
],
[
0.88,
0.35,
0.53
]
]
},
"objects": {}
}
}

View File

@ -466,5 +466,62 @@
}
}
}
},
"track": {
"colorset": {
"main": [
[
0.07,
0.64,
0.86
],
[
0.07,
0.84,
0.9
],
[
0.05,
0.71,
0.56
],
[
0.05,
0.69,
0.3
],
[
0.64,
0.94,
0.22
],
[
0.95,
0.79,
0.38
],
[
0.95,
0.49,
0.32
],
[
0.94,
0.25,
0.38
],
[
0.93,
0.2,
0.7
],
[
0.69,
0.06,
0.79
]
]
},
"objects": {}
}
}

View File

@ -1,5 +1,146 @@
{
"drums": {
"colorset": {
"beepbox_dark": [
[
0.44,
0.44,
0.44
],
[
0.6,
0.4,
0.2
],
[
0.29,
0.43,
0.56
],
[
0.48,
0.31,
0.6
],
[
0.38,
0.47,
0.22
]
]
},
"objects": {
"drumset": {
"visual": {
"name": "Drum Set"
}
},
"noise": {
"visual": {
"name": "Basic Noise"
}
}
}
},
"inst": {
"colorset": {
"beepbox_dark": [
[
0,
0.6,
0.63
],
[
0.63,
0.63,
0
],
[
0.78,
0.31,
0
],
[
0,
0.63,
0
],
[
0.82,
0.13,
0.82
],
[
0.47,
0.47,
0.69
],
[
0.54,
0.63,
0
],
[
0.87,
0,
0.1
],
[
0,
0.63,
0.44
],
[
0.57,
0.12,
1
]
]
},
"objects": {
"FM": {
"visual": {
"name": "FM"
}
},
"FM6op": {
"visual": {
"name": "Advanced FM"
}
},
"PWM": {
"visual": {
"name": "Pulse Width"
}
},
"Picked String": {
"visual": {
"name": "Picked String"
}
},
"chip": {
"visual": {
"name": "Chip Wave"
}
},
"custom chip": {
"visual": {
"name": "Custom Chip"
}
},
"harmonics": {
"visual": {
"name": "Harmonics"
}
},
"spectrum": {
"visual": {
"name": "Spectrum"
}
}
}
},
"preset": {
"midi_to": {
"1024": 120,
"1025": 86,

View File

@ -0,0 +1,34 @@
{
"track": {
"colorset": {
"main": [
[
0.83,
0.09,
0.42
],
[
0.91,
0.76,
0.36
],
[
0.22,
0.36,
0.6
],
[
0.44,
0.78,
0.66
],
[
0.64,
0.64,
0.64
]
]
},
"objects": {}
}
}

View File

@ -62,5 +62,52 @@
}
}
}
},
"track": {
"colorset": {
"orgmaker_2": [
[
0.23,
0.3,
0.99
],
[
0.62,
0.11,
0.12
],
[
0.62,
0.16,
0.87
],
[
0.14,
0.45,
0.26
],
[
0.13,
0.46,
0.57
],
[
0.67,
0.5,
0.11
],
[
0.59,
0.64,
0.71
],
[
0.58,
0.53,
0.49
]
]
},
"objects": {}
}
}

59
data_dset/petaporon.dset Normal file
View File

@ -0,0 +1,59 @@
{
"inst": {
"colorset": {
"main": [
[
0.22,
0.52,
0.35
],
[
0.51,
0.88,
0.3
],
[
1.0,
0.95,
0.46
],
[
1.0,
0.75,
0.21
],
[
0.81,
0.47,
0.34
],
[
0.88,
0.25,
0.25
],
[
1.0,
0.5,
0.67
],
[
0.75,
0.25,
0.7
],
[
0.22,
0.6,
1.0
],
[
0.43,
0.93,
1.0
]
]
},
"objects": {}
}
}

View File

@ -0,0 +1,89 @@
{
"inst": {
"colorset": {
"main": [
[
1,
1,
1
],
[
0.31,
0.31,
1
],
[
0.31,
1,
0.31
],
[
0.31,
1,
1
],
[
1,
0.31,
0.31
],
[
1,
0.31,
1
],
[
1,
1,
0.31
],
[
1,
0.65,
0.48
],
[
0.48,
0.65,
1
],
[
0.65,
1,
0.48
],
[
0.48,
1,
0.65
],
[
1,
0.48,
0.65
],
[
0.65,
0.48,
1
],
[
0.4,
1,
0.7
],
[
0.7,
1,
0.4
],
[
1,
0.35,
0.74
]
]
},
"objects": {}
}
}

8
data_dset/piyopiyo.dset Normal file
View File

@ -0,0 +1,8 @@
{
"inst": {
"colorset": {
"main": [[0.25, 0.38, 0.49], [0.36, 0.43, 0.46], [0.51, 0.57, 0.47], [0.58, 0.64, 0.40]]
},
"objects": {}
}
}

54
data_dset/pxtone.dset Normal file
View File

@ -0,0 +1,54 @@
{
"track": {
"colorset": {
"main": [
[
0.94,
0.5,
0.0
],
[
0.41,
0.47,
1.0
],
[
0.79,
0.72,
0.72
],
[
0.68,
0.25,
1.0
],
[
0.57,
0.78,
0.0
],
[
0.99,
0.2,
0.8
],
[
0.0,
0.75,
0.38
],
[
1.0,
0.47,
0.36
],
[
0.0,
0.74,
1.0
]
]
},
"objects": {}
}
}

View File

@ -93,42 +93,42 @@
},
"curve": {
"def": "",
"max": 1,
"min": 0,
"max": null,
"min": null,
"name": "Curve",
"noauto": false,
"noauto": true,
"type": "string"
},
"curve1": {
"def": "",
"max": 1,
"min": 0,
"max": null,
"min": null,
"name": "Curve1",
"noauto": false,
"noauto": true,
"type": "string"
},
"curve2": {
"def": "",
"max": 1,
"min": 0,
"max": null,
"min": null,
"name": "Curve2",
"noauto": false,
"noauto": true,
"type": "string"
},
"curve3": {
"def": "",
"max": 1,
"min": 0,
"max": null,
"min": null,
"name": "Curve3",
"noauto": false,
"noauto": true,
"type": "string"
},
"curve4": {
"def": "",
"max": 1,
"min": 0,
"max": null,
"min": null,
"name": "Curve4",
"noauto": false,
"noauto": true,
"type": "string"
},
"delay_amount": {

View File

@ -30,10 +30,22 @@ def widgit_txt_manip(i_text):
btn_del = imgui.button("Del")
return btn_add, btn_del, i_text
def widgit_color_manip(i_color):
wc_color, wi_color = imgui.color_edit3('##colorin', i_color)
if wc_color: i_color = wi_color
btn_add = False
btn_del = False
if i_color != '':
imgui.same_line()
btn_add = imgui.button("Add")
imgui.same_line()
btn_del = imgui.button("Del")
return btn_add, btn_del, i_color
def widgit_list_manip(i_text, i_list, i_numname, i_vlist):
imgui.separator()
imgui.push_item_width(400)
c_listdata, w_listdata = imgui.list_box('##', i_numname[0], i_list if i_vlist == None else i_vlist)
c_listdata, w_listdata = imgui.list_box('##wlistm', i_numname[0], i_list if i_vlist == None else i_vlist)
if c_listdata:
i_numname[0] = w_listdata
i_numname[1] = i_list[i_numname[0]]
@ -46,11 +58,10 @@ def widgit_txt_but(i_text, i_list, i_numname, i_vlist):
i_text, i_numname, c_listdata, w_listdata = widgit_list_manip(i_text, i_list, i_numname, i_vlist)
return btn_add, btn_del, i_text, i_numname, w_listdata
def widgit_txtint_but(i_text, i_value, i_list, i_numname, i_vlist):
def widgit_color_but(i_text, i_list, i_numname, i_vlist):
btn_add, btn_del, i_text = widgit_txt_manip(i_text)
val_m, i_value = imgui.input_text("##val", i_value)
i_text, i_numname, c_listdata, w_listdata = widgit_list_manip(i_text, i_list, i_numname, i_vlist)
return btn_add, btn_del, i_text, i_value, i_numname, c_listdata, w_listdata, val_m
return btn_add, btn_del, i_text, i_numname, w_listdata
def widgit_dict_txt(dict_data, dict_name, ctrl_txt, ctrl_type):
paramfound = False
@ -113,6 +124,7 @@ g_current_param = [0, None]
g_current_map = [0, None]
g_current_group = [0, None]
g_current_drumset = [0, None]
g_current_colorset = [0, None]
# ####################################################################################################
# ####################################################################################################
@ -504,6 +516,61 @@ def widgits___drumset_editor():
else:
if imgui.button('Create Drumset'): main_dataset.drumset_create(g_current_cat[1], g_current_object[1])
# ####################################################################################################
txtbox_colorset_name = ''
colorbox_colorset_color = [0,0,0]
def window___colorset_list():
window_data = hello_imgui.DockableWindow()
window_data.label = "Colorset List"
window_data.dock_space_name = "VisualEditor"
window_data.gui_function = widgits___colorset_list
return window_data
def widgits___colorset_list():
global g_current_cat
global g_current_colorset
global txtbox_colorset_name
global colorbox_colorset_color
if g_current_cat[1]:
colorsetlist = main_dataset.colorset_list(g_current_cat[1])
if colorsetlist != None:
#colorset = main_dataset.colorset_e_list(g_current_cat[1], g_current_colorset[1])
#c_btn_add, c_btn_del, colorbox_colorset_color = widgit_color_manip(colorbox_colorset_color)
#if colorset != None:
# for color in colorset:
# imgui.color_edit3('##', color)
#imgui.separator()
btn_add, btn_del, txtbox_colorset_name, g_current_colorset, w_listdata = widgit_txt_but(txtbox_colorset_name, colorsetlist, g_current_colorset, None)
if btn_add:
print(txtbox_colorset_name)
main_dataset.colorset_add(g_current_cat[1], txtbox_colorset_name)
else:
if imgui.button('Create Colorset'): main_dataset.colorset_create(g_current_cat[1])
def window___colorset_editor():
window_data = hello_imgui.DockableWindow()
window_data.label = "Colorset Editor"
window_data.dock_space_name = "ExEditorSpace"
window_data.gui_function = widgits___colorset_editor
return window_data
def widgits___colorset_editor():
global g_current_cat
global g_current_colorset
global txtbox_colorset_name
global colorbox_colorset_color
if g_current_cat[1]:
colorsetlist = main_dataset.colorset_list(g_current_cat[1])
if colorsetlist != None:
colorset = main_dataset.colorset_e_list(g_current_cat[1], g_current_colorset[1])
c_btn_add, c_btn_del, colorbox_colorset_color = widgit_color_manip(colorbox_colorset_color)
if c_btn_add: main_dataset.colorset_e_add(g_current_cat[1], g_current_colorset[1], colorbox_colorset_color)
imgui.separator()
if colorset != None:
for color in colorset: imgui.color_edit3('##', color)
# ####################################################################################################
# ####################################################################################################
# --- Param Viewer Window
@ -697,6 +764,8 @@ def create_dockable_windows() -> List[hello_imgui.DockableWindow]:
window___gm_map(),
window___group_list(),
window___drumset_editor(),
window___colorset_list(),
window___colorset_editor(),
window___param_editor(),
window___param_viewer(),
]

View File

@ -47,3 +47,24 @@ def rgb_float_to_hex(rgb_float): return rgb_int_to_hex(rgb_float_to_rgb_int(rgb_
def moregray(rgb_float): return [(rgb_float[0]/2)+0.25,(rgb_float[1]/2)+0.25,(rgb_float[2]/2)+0.25]
def darker(rgb_float, minus):
return [xtramath.clamp(rgb_float[0]-minus, 0, 1),xtramath.clamp(rgb_float[1]-minus, 0, 1),xtramath.clamp(rgb_float[2]-minus, 0, 1)]
class colorset:
def __init__(self, colorset):
self.colorset = colorset
self.colorlen = len(self.colorset) if self.colorset != None else 0
self.num = 0
def getcolor(self):
if self.colorset:
out_color = self.colorset[self.num % self.colorlen]
self.num += 1
return out_color
else:
return None
def getcolornum(self, colornum):
if self.colorset:
out_color = self.colorset[colornum % self.colorlen]
return out_color
else:
return None

View File

@ -165,6 +165,39 @@ class dataset:
if data_values.nested_dict_get_value(self.dataset, [c_name, 'objects', o_name, 'params', p_name]) != None:
self.dataset[c_name]['objects'][o_name]['params'][p_name] = {'noauto': value[0], 'type': value[1], 'def': value[2], 'min': value[3], 'max': value[4], 'name': value[5]}
# ####################################################################################################
# ####################################################################################################
# --- Colorset
# ####################################################################################################
# ####################################################################################################
def colorset_list(self, c_name):
colorset = None
if data_values.nested_dict_get_value(self.dataset, [c_name, 'colorset']) != None:
colorset = [x for x in self.dataset[c_name]['colorset']]
return colorset
def colorset_create(self, c_name):
if c_name in self.dataset: self.dataset[c_name]['colorset'] = {}
def colorset_add(self, c_name, s_name):
if data_values.nested_dict_get_value(self.dataset, [c_name, 'colorset']) != None:
self.dataset[c_name]['colorset'][s_name] = []
def colorset_e_list(self, c_name, s_name):
outval = []
if data_values.nested_dict_get_value(self.dataset, [c_name, 'colorset', s_name]) != None:
outval = [x for x in self.dataset[c_name]['colorset'][s_name]]
return outval
def colorset_e_add(self, c_name, s_name, i_color):
if data_values.nested_dict_get_value(self.dataset, [c_name, 'colorset', s_name]) != None:
self.dataset[c_name]['colorset'][s_name].append(i_color)
def colorset_e_del(self, c_name, s_name, num):
if data_values.nested_dict_get_value(self.dataset, [c_name, 'colorset', s_name]) != None:
del self.dataset[c_name]['colorset'][s_name][num]
# ####################################################################################################
# ####################################################################################################
# --- Midi Map
@ -174,7 +207,6 @@ class dataset:
def midid_to_num(self, i_bank, i_patch, i_isdrum): return i_bank*256 + i_patch + int(i_isdrum)*128
def midid_from_num(self, value): return (value>>8), (value%128), int(bool(value&0b10000000))
def midito_list(self, c_name):
pmap = None
if data_values.nested_dict_get_value(self.dataset, [c_name, 'midi_to']) != None:

View File

@ -41,3 +41,201 @@ def freq_to_note(freq):
def note_to_freq(note):
return (440/32)*(2**((note+63)/12))
class pitchmod2point:
def __init__(self):
self.pitchpoints = []
self.pitch_cur = 0
self.pitch_prev = 0
self.slide_zeropospointexist = False
def pitchmod2point(cvpj_note, position, ptype, maindur, slideparam, input_pitch):
mainslideparam_mul = slideparam*maindur
pitch_exact = False
if ptype == 0:
if slideparam <= maindur:
self.pitch_cur += input_pitch
self.pitchpoints.append({'position': position, 'value': self.pitch_prev})
self.pitchpoints.append({'position': position+slideparam, 'value': self.pitch_cur})
elif slideparam > maindur:
self.pitch_cur = xtramath.between_from_one(self.pitch_prev, self.pitch_prev+input_pitch, maindur/slideparam)
self.pitchpoints.append({'position': position, 'value': self.pitch_prev})
self.pitchpoints.append({'position': position+maindur, 'value': self.pitch_cur})
elif ptype == 1:
input_pitch -= cvpj_note['key']
outdur = maindur
if self.pitch_cur < input_pitch:
self.pitch_cur += (mainslideparam_mul)
pitch_exact = input_pitch < self.pitch_cur
if pitch_exact == True: outdur = (mainslideparam_mul-(self.pitch_cur-input_pitch))/slideparam
elif self.pitch_cur > input_pitch:
self.pitch_cur -= (mainslideparam_mul)
pitch_exact = input_pitch > self.pitch_cur
if pitch_exact == True: outdur = (mainslideparam_mul+(self.pitch_cur-input_pitch))/slideparam
if pitch_exact == True: self.pitch_cur = input_pitch
totalslidedur = outdur/maindur
if totalslidedur > 0.1:
self.pitchpoints.append({'position': position, 'value': self.pitch_prev})
self.pitchpoints.append({'position': position+(totalslidedur), 'value': self.pitch_cur})
else:
self.pitchpoints.append({'position': position, 'value': self.pitch_cur, 'type': 'instant'})
elif ptype == 2:
#print(
# str(position).ljust(4),
# str(maindur).ljust(4),
# str(slideparam).ljust(4),
# str(input_pitch).ljust(4),
# str(position).rjust(4)+'-'+str(position+slideparam).ljust(4),
# str(slideparam/maindur).ljust(4),
# )
if self.slide_zeropospointexist == False:
self.pitchpoints.append({'position': 0, 'value': 0})
self.slide_zeropospointexist = True
if slideparam != 0:
self.pitchpoints.append({'position': position, 'value': self.pitch_cur})
if slideparam > maindur:
self.pitchpoints.append({'position': position+maindur, 'value': input_pitch*(maindur/slideparam)})
self.pitch_cur = input_pitch*(maindur/slideparam)
else:
self.pitchpoints.append({'position': position+slideparam, 'value': input_pitch})
self.pitch_cur = input_pitch
else:
self.pitchpoints.append({'position': position, 'value': input_pitch, 'type': 'instant'})
self.pitch_cur = input_pitch
self.pitch_prev = self.pitch_cur
class notelist:
def __init__(self, ppq, in_notelist):
self.nl = []
self.ppq = ppq
self.used_inst = []
if in_notelist != None:
for cn in in_notelist:
cvpj_note = cn.copy()
t_pos = cvpj_note['position']
t_dur = cvpj_note['duration']
t_key = cvpj_note['key']
t_vol = cvpj_note['vol'] if 'vol' in cvpj_note else None
t_inst = cvpj_note['instrument'] if 'instrument' in cvpj_note else None
del cvpj_note['position']
del cvpj_note['duration']
del cvpj_note['key']
if t_vol != None: del cvpj_note['vol']
if t_inst != None: del cvpj_note['instrument']
t_extra = cvpj_note
self.add_m(t_inst, t_pos, t_dur, t_key, t_vol, t_extra)
def add_r(self, t_pos, t_dur, t_key, t_vol, t_extra):
self.nl.append([t_pos, t_dur, t_key, t_vol, None, t_extra, None, None])
def add_m(self, t_inst, t_pos, t_dur, t_key, t_vol, t_extra):
self.nl.append([t_pos, t_dur, t_key, t_vol, t_inst, t_extra, None, None])
if t_inst != None:
if t_inst not in self.used_inst: self.used_inst.append(t_inst)
def auto_add_last(self, a_type, p_pos, p_val, p_type, p_tension):
if self.nl != []:
if self.nl[-1][6] == None: self.nl[-1][6] = {}
notedata = self.nl[-1]
if a_type not in notedata[6]: notedata[6][a_type] = []
notedata[6][a_type].append([p_pos, p_val, p_type, p_tension])
def get_dur(self):
duration_final = 0
for note in self.nl:
noteendpos = new_n[0]+new_n[1]
if duration_final < noteendpos: duration_final = noteendpos
return duration_final/self.ppq
def edit_move(self, pos):
new_nl = []
for n in self.nl.copy():
new_n[0] += pos
if new_n[0] >= 0: new_nl.append()
self.nl = new_nl
def edit_move_minus(self, pos):
for n in self.nl: n[0] += pos
def edit_trim(self, pos):
new_nl = []
for n in self.nl.copy():
if new_n[0] < pos: new_nl.append()
self.nl = new_nl
def edit_trimmove(self, startat, endat):
if endat != None: self.trim(endat)
if startat != None: self.move(-startat)
def sort(self):
t_nl_bsort = {}
t_nl_sorted = {}
new_nl = []
for n in self.nl:
if new_n[0] not in t_nl_bsort: t_nl_bsort[new_n[0]] = []
t_nl_bsort[new_n[0]].append(n)
t_nl_sorted = dict(sorted(t_nl_bsort.items(), key=lambda item: item[0]))
for p in t_nl_sorted:
for note in t_nl_sorted[p]: new_nl.append(note)
self.nl = new_nl
def to_cvpj(self):
cvpj_out = []
for note in self.nl:
note_data = {}
note_data['position'] = (note[0]/self.ppq)*4
note_data['duration'] = (note[1]/self.ppq)*4
note_data['key'] = note[2]
if note[3] != None: note_data['vol'] = note[3]
if note[4] != None: note_data['instrument'] = note[4]
if note[5] != None:
for key, value in note[5].items(): note_data[key] = value
if note[6] != None:
if 'notemod' not in note_data: note_data['notemod'] = {}
if 'auto' not in note_data['notemod']: note_data['notemod']['auto'] = {}
for key, value in note[6].items():
note_data['notemod']['auto'][key] = []
for autod in value:
pointJ = {}
pointJ['position'] = (autod[0]/self.ppq)*4
pointJ['value'] = autod[1]
pointJ['type'] = autod[2] if autod[2] != None else 'normal'
if autod[3] != None: pointJ['tension'] = autod[3]
note_data['notemod']['auto'][key].append(pointJ)
cvpj_out.append(note_data)
return cvpj_out

View File

@ -91,14 +91,12 @@ def pitchmod2point(cvpj_note, position, ptype, maindur, slideparam, input_pitch)
if pitch_cur < input_pitch:
pitch_cur += (mainslideparam_mul)
pitch_exact = input_pitch < pitch_cur
if pitch_exact == True:
outdur = (mainslideparam_mul-(pitch_cur-input_pitch))/slideparam
if pitch_exact == True: outdur = (mainslideparam_mul-(pitch_cur-input_pitch))/slideparam
elif pitch_cur > input_pitch:
pitch_cur -= (mainslideparam_mul)
pitch_exact = input_pitch > pitch_cur
if pitch_exact == True:
outdur = (mainslideparam_mul+(pitch_cur-input_pitch))/slideparam
if pitch_exact == True: outdur = (mainslideparam_mul+(pitch_cur-input_pitch))/slideparam
if pitch_exact == True:
pitch_cur = input_pitch

View File

@ -478,6 +478,9 @@ class cvpj_plugin:
data_HP = [False,0,0,0,0]
banddata = self.eqband_get(group)
data_auto = [None, None, [None,None,None,None], None, None]
bandnum = 1
for s_band in banddata:
bandtype = s_band['type']
@ -488,17 +491,27 @@ class cvpj_plugin:
part = [True, band_on, band_freq, band_gain, band_res]
if bandtype == 'low_pass' and band_on: data_LP = part
if bandtype == 'low_shelf' and band_on: data_Lowshelf = part
if bandtype == 'low_pass':
data_LP = part
data_auto[0] = bandnum
if bandtype == 'low_shelf':
data_Lowshelf = part
data_auto[1] = bandnum
if bandtype == 'peak' and band_on:
for peaknum in range(4):
peakdata = data_Peaks[peaknum]
if peakdata[0] == False:
data_Peaks[peaknum] = part
data_auto[2][peaknum] = bandnum
break
if bandtype == 'high_shelf' and band_on: data_HighShelf = part
if bandtype == 'high_pass' and band_on: data_HP = part
return data_LP, data_Lowshelf, data_Peaks, data_HighShelf, data_HP
if bandtype == 'high_shelf':
data_HighShelf = part
data_auto[3] = bandnum
if bandtype == 'high_pass':
data_HP = part
data_auto[4] = bandnum
bandnum += 1
return data_LP, data_Lowshelf, data_Peaks, data_HighShelf, data_HP, data_auto
# -------------------------------------------------- wave
def wave_add(self, i_name, i_wavepoints, i_min, i_max):

View File

@ -375,4 +375,27 @@ def multi_convert(cvpj_l, i_rows, i_patterns, i_orders, i_chantype, i_len_table)
curpos += i_len_table[curpatnum]
curpatnum += 1
#print(multi_used_instruments)
#print(multi_used_instruments)
class patterndata:
def __init__(self, number_of_channels):
self.patterndata = {}
self.num_chans = number_of_channels
def pattern_add(self, num, rows):
s_patdata = []
for _ in range(rows):
s_patdata.append([{}, [[None, None, {}] for _ in range(self.num_chans)]])
self.patterndata[num] = s_patdata
def cell_data(self, n_pat, n_row, n_chan, c_note, c_inst, c_partype, c_parval):
if n_pat in self.patterndata:
if n_row < len(self.patterndata[n_pat]):
if n_chan < self.num_chans:
patdata = self.patterndata[n_pat][n_row][1][n_chan]
if c_note != None: patdata[0] = c_note
if c_inst != None: patdata[1] = c_note
if c_partype != None: patdata[2][c_partype] = c_parval

View File

@ -52,8 +52,9 @@ def process_r(cvpj_l):
if output_id[2] in cvpj_l['track_placements']:
track_placements = cvpj_l['track_placements'][output_id[2]]
if 'audio_nested' in track_placements:
for spld in track_placements['audio_nested']:
spld['fxrack_channel'] = output_id[0]+1
for spld in track_placements['audio_nested']: spld['fxrack_channel'] = output_id[0]+1
if 'audio' in track_placements:
for spld in track_placements['audio']: spld['fxrack_channel'] = output_id[0]+1
for output_id in output_ids:

View File

@ -87,8 +87,7 @@ class input_mariopaint_msq(plugin_input.base):
tracks_m.playlist_add(cvpj_l, 1)
tracks_m.add_pl(cvpj_l, 1, 'notes', placement_data.nl2pl(cvpj_notelist))
for instname in instnames:
tracks_m.import_dset(cvpj_l, instname, instname, dataset, dataset_midi, None, None)
for instname in instnames: tracks_m.import_dset(cvpj_l, instname, instname, dataset, dataset_midi, None, None)
cvpj_l['do_addloop'] = True
cvpj_l['do_singlenotelistcut'] = True

View File

@ -82,7 +82,7 @@ class input_mariopaint_smp(plugin_input.base):
tracks_m.add_pl(cvpj_l, 1, 'notes', placement_data.nl2pl(cvpj_notelist))
for instname in instnames:
tracks_m.import_dset(cvpj_l, instname, instname, dataset, dataset_midi, None, None)
tracks_m.import_dset(cvpj_l, instname, smpnames[instname], dataset, dataset_midi, None, None)
cvpj_l['do_addloop'] = True
cvpj_l['do_singlenotelistcut'] = True

View File

@ -84,6 +84,8 @@ class input_ceol(plugin_input.base):
tracks_master.create(cvpj_l, 1)
tracks_master.visual(cvpj_l, name='Master', color=[0.31373, 0.39608, 0.41569])
masterfx_plugindata = None
if ceol_basic_effect == 0: #delay
masterfx_plugindata = plugins.cvpj_plugin('deftype', 'universal', 'delay-c')
masterfx_plugindata.fxdata_add(1, 0.5)
@ -122,7 +124,7 @@ class input_ceol(plugin_input.base):
masterfx_plugindata = plugins.cvpj_plugin('deftype', 'universal', 'eq-bands')
masterfx_plugindata.eqband_add(1, calc_cutoff(ceol_basic_effectvalue), 0, 'high_pass', 1, None)
masterfx_plugindata.to_cvpj(cvpj_l, 'master-effect')
if masterfx_plugindata: masterfx_plugindata.to_cvpj(cvpj_l, 'master-effect')
fxslot.insert(cvpj_l, ['master'], 'audio', 'master-effect')

View File

@ -16,7 +16,6 @@ import struct
import json
def fxget(fxtype, fxparam, output_param, output_extra):
song_tracker_fx_mod.do_fx(None, output_extra, output_param, fxtype, fxparam)
if fxtype == 7:

View File

@ -10,6 +10,7 @@ from functions import data_values
from functions import placement_data
from functions import placements
from functions import xtramath
from functions import colors
from functions import song
from functions_tracks import auto_data
from functions_tracks import fxrack
@ -20,15 +21,6 @@ import plugin_input
import json
import math
global colornum_p
global colornum_d
colornum_p = 0
colornum_d = 0
colors_pitch = [[0, 0.6, 0.63], [0.63, 0.63, 0], [0.78, 0.31, 0], [0, 0.63, 0], [0.82, 0.13, 0.82], [0.47, 0.47, 0.69], [0.54, 0.63, 0], [0.87, 0, 0.1], [0, 0.63, 0.44], [0.57, 0.12, 1]]
colors_drums = [ [0.44, 0.44, 0.44], [0.6, 0.4, 0.2], [0.29, 0.43, 0.56], [0.48, 0.31, 0.6], [0.38, 0.47, 0.22]]
filtervals = [2378.41, 3363.59, 4756.83, 5656.85, 8000, 9513.66, 11313.71, 13454.34, 16000, 19027.31, None]
rawChipWaves = {}
@ -133,33 +125,6 @@ noteoffset['D'] = 2
noteoffset['C♯'] = 1
noteoffset['C'] = 0
inst_names = {
"FM6op": "advanced FM",
"chip": "Chip Wave",
"PWM": "Pulse Width",
"harmonics": "Harmonics",
"Picked String": "Picked String",
"spectrum": "Spectrum",
"FM": "FM",
"custom chip": "Custom Chip",
"noise": "Basic Noise",
"drumset": "Drum Set"
}
def getcolor_p():
global colornum_p
out_color = colors_pitch[colornum_p]
colornum_p += 1
if colornum_p == 10: colornum_p = 0
return out_color
def getcolor_d():
global colornum_d
out_color = colors_drums[colornum_d]
colornum_d += 1
if colornum_d == 5: colornum_d = 0
return out_color
def calcval(value):
global jummbox_beatsPerBar
global jummbox_ticksPerBeat
@ -204,7 +169,7 @@ def parse_instrument(channum, instnum, bb_instrument, bb_type, bb_color, bb_inst
a_decay = 3
a_sustain = 1
m_bank, m_inst, m_drum = dataset.midito_get('inst', bb_preset)
m_bank, m_inst, m_drum = dataset.midito_get('preset', bb_preset)
if m_inst != None:
tracks_mi.import_dset(cvpj_l, cvpj_instid, bb_preset, dataset, dataset_midi, None, bb_color)
@ -214,7 +179,8 @@ def parse_instrument(channum, instnum, bb_instrument, bb_type, bb_color, bb_inst
if 'unison' in bb_instrument: inst_plugindata.dataval_add('unison', bb_instrument['unison'])
cvpj_instname = inst_names[bb_inst_type]
if bb_type == 'pitch': cvpj_instname, _ = dataset.object_get_name_color('inst', bb_inst_type)
if bb_type == 'drum': cvpj_instname, _ = dataset.object_get_name_color('drums', bb_inst_type)
if bb_inst_type == 'chip':
bb_inst_wave = bb_instrument['wave']
@ -430,8 +396,8 @@ def parse_channel(channeldata, channum, durpos):
bb_sequence = channeldata['sequence']
if bb_type == 'pitch' or bb_type == 'drum':
if bb_type == 'pitch': bb_color = getcolor_p()
if bb_type == 'drum': bb_color = getcolor_d()
if bb_type == 'pitch': bb_color = colors_pitch.getcolor()
if bb_type == 'drum': bb_color = colors_drums.getcolor()
t_instnum = 1
for bb_instrument in bb_instruments:
@ -556,7 +522,7 @@ def parse_channel(channeldata, channum, durpos):
cvpj_autopl = auto.multiply([cvpj_autodata], 0, 0.04)
auto_data.add_pl(cvpj_l, 'float', ['track', auto_cvpj_instid, 'vol'], cvpj_autopl[0])
placement_pos += bb_partdur
placement_pos += bb_partdur
def get_durpos(jummbox_channels):
global jummbox_notesize
@ -618,6 +584,9 @@ class input_jummbox(plugin_input.base):
global dataset
global dataset_midi
global colors_pitch
global colors_drums
global jummbox_beatsPerBar
global jummbox_ticksPerBeat
global jummbox_key
@ -630,6 +599,9 @@ class input_jummbox(plugin_input.base):
dataset = data_dataset.dataset('./data_dset/beepbox.dset')
dataset_midi = data_dataset.dataset('./data_dset/midi.dset')
colors_pitch = colors.colorset(dataset.colorset_e_list('inst', 'beepbox_dark'))
colors_drums = colors.colorset(dataset.colorset_e_list('drums', 'beepbox_dark'))
bbcvpj_placementsize = []
bbcvpj_placementnames = {}

View File

@ -7,14 +7,14 @@ from functions import note_data
from functions import placement_data
from functions import plugins
from functions import song
from functions import colors
from functions import data_dataset
from functions_tracks import tracks_mi
import plugin_input
import json
import struct
import os
pixi_colors = [[1, 1, 1],[0.31, 0.31, 1],[0.31, 1, 0.31],[0.31, 1, 1],[1, 0.31, 0.31],[1, 0.31, 1],[1, 1, 0.31],[1, 0.65, 0.48],[0.48, 0.65, 1],[0.65, 1, 0.48],[0.48, 1, 0.65],[1, 0.48, 0.65],[0.65, 0.48, 1],[0.40, 1, 0.7],[0.70, 1, 0.4],[1, 0.35, 0.74]]
class input_cvpj_f(plugin_input.base):
def __init__(self): pass
def is_dawvert_plugin(self): return 'input'
@ -43,6 +43,9 @@ class input_cvpj_f(plugin_input.base):
samplefolder = extra_param['samplefolder']
dataset = data_dataset.dataset('./data_dset/pixitracker.dset')
colordata = colors.colorset(dataset.colorset_e_list('inst', 'main'))
for _ in range(16): pixi_data_sounds.append([None,None,None,None,None,None,None,None])
for pixi_chunk in pixi_chunks:
@ -142,7 +145,7 @@ class input_cvpj_f(plugin_input.base):
pluginid = plugins.get_id()
tracks_mi.inst_create(cvpj_l, cvpj_instid)
tracks_mi.inst_visual(cvpj_l, cvpj_instid, name='Inst #'+str(instnum+1), color=pixi_colors[instnum])
tracks_mi.inst_visual(cvpj_l, cvpj_instid, name='Inst #'+str(instnum+1), color=colordata.getcolornum(instnum))
if pixi_data_sounds[instnum] != [None,None,None,None,None,None,None,None]:
t_sounddata = pixi_data_sounds[instnum]

View File

@ -409,17 +409,17 @@ class input_it(plugin_input.base):
for itpd in envvardata['points']:
if envtype == 'vol':
inst_plugindata.env_points_add('vol', itpd['pos']/48, itpd['value']/64)
if susenabled == 1: plugins.add_env_point_var(cvpj_l, pluginid, 'vol', 'sustain', envvardata['susloop_start']+1)
if susenabled == 1: inst_plugindata.env_points_addvar('vol', 'sustain', envvardata['susloop_start']+1)
if envtype == 'pan':
inst_plugindata.env_points_add('pan', itpd['pos']/48, (itpd['value'])/32)
if susenabled == 1: plugins.add_env_point_var(cvpj_l, pluginid, 'pan', 'sustain', envvardata['susloop_start']+1)
if susenabled == 1: inst_plugindata.env_points_addvar('pan', 'sustain', envvardata['susloop_start']+1)
if envtype == 'pitch':
if envvardata['usepitch'] != 1:
inst_plugindata.env_points_add('pitch', itpd['pos']/48, (itpd['value']))
if susenabled == 1: plugins.add_env_point_var(cvpj_l, pluginid, 'pitch', 'sustain', envvardata['susloop_start']+1)
if susenabled == 1: inst_plugindata.env_points_addvar('pitch', 'sustain', envvardata['susloop_start']+1)
else:
inst_plugindata.env_points_add('cutoff', itpd['pos']/48, (itpd['value']/64))
if susenabled == 1: plugins.add_env_point_var(cvpj_l, pluginid, 'cutoff', 'sustain', envvardata['susloop_start']+1)
if susenabled == 1: inst_plugindata.env_points_addvar('cutoff', 'sustain', envvardata['susloop_start']+1)
filterenv_used = True
if it_singleinst['fadeout'] != 0:

View File

@ -5,6 +5,7 @@ from functions import note_data
from functions import placement_data
from functions import plugins
from functions import song
from functions import colors
from functions import data_dataset
from functions_tracks import fxslot
from functions_tracks import trackfx
@ -15,19 +16,6 @@ import plugin_input
import xml.etree.ElementTree as ET
import zipfile
as_pattern_color = {
0: [0.07, 0.64, 0.86],
1: [0.07, 0.84, 0.90],
2: [0.05, 0.71, 0.56],
3: [0.05, 0.69, 0.30],
4: [0.64, 0.94, 0.22],
5: [0.95, 0.79, 0.38],
6: [0.95, 0.49, 0.32],
7: [0.94, 0.25, 0.38],
8: [0.93, 0.20, 0.70],
9: [0.69, 0.06, 0.79],
}
def getvalue(xmltag, xmlname, fallbackval):
if xmltag.findall(xmlname) != []: return xmltag.findall(xmlname)[0].text.strip()
else: return fallbackval
@ -109,7 +97,6 @@ def make_fxslot(x_device_sound, fx_type, as_device):
fx_plugindata.fxvisual_add('Amp', None)
fx_plugindata.to_cvpj(cvpj_l, pluginid)
return pluginid
class input_audiosanua(plugin_input.base):
@ -134,6 +121,7 @@ class input_audiosanua(plugin_input.base):
cvpj_l = {}
dataset = data_dataset.dataset('./data_dset/audiosauna.dset')
colordata = colors.colorset(dataset.colorset_e_list('track', 'main'))
songdataxml_filename = None
@ -196,7 +184,7 @@ class input_audiosanua(plugin_input.base):
cvpj_tr_mute = getbool(x_chan.get('mute'))
cvpj_tr_solo = getbool(x_chan.get('solo'))
cvpj_tr_color = as_pattern_color[as_channum]
cvpj_tr_color = colordata.getcolornum(as_channum)
tracks_r.track_create(cvpj_l, cvpj_id, 'instrument')
tracks_r.track_visual(cvpj_l, cvpj_id, name=cvpj_tr_name, color=cvpj_tr_color)
@ -219,7 +207,7 @@ class input_audiosanua(plugin_input.base):
cvpj_pldata = placement_data.makepl_n(as_pattern_startTick/32, (as_pattern_endTick-as_pattern_startTick)/32, [])
cvpj_pldata['cut'] = {'type': 'cut', 'start': 0, 'end': as_pattern_patternLength/32}
cvpj_pldata['color'] = as_pattern_color[as_pattern_patternColor]
cvpj_pldata['color'] = colordata.getcolornum(as_pattern_patternColor)
if as_pattern_patternId in as_patt_notes:
t_notelist = as_patt_notes[as_pattern_patternId]

View File

@ -322,6 +322,7 @@ def lmms_decodeplugin(trkX_insttr):
wave_data = struct.unpack('f'*sampleshape_size, sampleshape)
inst_plugindata.wave_add('main', wave_data, -1, 1)
inst_plugindata.osc_num_oscs(1)
inst_plugindata.osc_opparam_set(0, 'shape', 'custom_wave')
inst_plugindata.osc_opparam_set(0, 'wave_name', 'main')
if pluginname == "sid":
@ -374,30 +375,26 @@ def lmms_decodeplugin(trkX_insttr):
# ------- Notelist -------
def lmms_decode_nlpattern(notesX):
notelist = []
printcountpat = 0
cvpj_notelist = note_data.notelist(48, None)
for noteX in notesX:
noteJ = note_data.rx_makenote(float(noteX.get('pos'))/12, float(noteX.get('len'))/12, int(noteX.get('key'))-60, hundredto1(noteX.get('vol')), hundredto1(noteX.get('pan')))
cvpj_notelist.add_r(int(noteX.get('pos')), int(noteX.get('len')), int(noteX.get('key'))-60, hundredto1(noteX.get('vol')), {'pan': hundredto1(noteX.get('pan'))})
noteX_auto = noteX.findall('automationpattern')
if len(noteX_auto) != 0:
noteJ['notemod'] = {}
noteJ['notemod']['auto'] = {}
noteJ['notemod']['auto']['pitch'] = []
noteX_auto = noteX.findall('automationpattern')[0]
if len(noteX_auto.findall('detuning')) != 0:
noteX_detuning = noteX_auto.findall('detuning')[0]
if len(noteX_detuning.findall('time')) != 0:
prognum = int(noteX_detuning.get('prog'))
for pointX in noteX_detuning.findall('time'):
pointJ = {}
pointJ['position'] = float(pointX.get('pos')) / 12
pointJ['value'] = float(pointX.get('value'))
pointJ['type'] = 'instant' if prognum == 0 else 'normal'
noteJ['notemod']['auto']['pitch'].append(pointJ)
cvpj_notelist.auto_add_last('pitch',
int(pointX.get('pos')), float(pointX.get('value')),
'instant' if prognum == 0 else 'normal', None)
printcountpat += 1
notelist.append(noteJ)
print('['+str(printcountpat), end='] ')
return notelist
return cvpj_notelist.to_cvpj()
def lmms_decode_nlplacements(trkX):
nlplacements = []
patsX = trkX.findall('pattern')
@ -432,7 +429,10 @@ def lmms_decode_inst_track(trkX, trackid):
#trkX_insttr
trkX_insttr = trkX.findall('instrumenttrack')[0]
track_color, pluginname, instpluginid = lmms_decodeplugin(trkX_insttr)
plug_color, pluginname, instpluginid = lmms_decodeplugin(trkX_insttr)
track_color = trkX.get('color')
if track_color == None: track_color = plug_color
else: track_color = track_color = colors.hex_to_rgb_float(track_color)
add_window_data(trkX, cvpj_l, 'plugin', instpluginid)
cvpj_pan = float(lmms_auto_getvalue(trkX_insttr, 'pan', 0, 'float', [0, 0.01], ['track', trackid, 'pan']))
cvpj_vol = float(lmms_auto_getvalue(trkX_insttr, 'vol', 100, 'float', [0, 0.01], ['track', trackid, 'vol']))
@ -876,8 +876,9 @@ class input_lmms(plugin_input.base):
if len(trkscX): add_window_data(trkscX[0], cvpj_l, 'main', 'tracklist')
trksX = trkscX[0].findall('track')
fxmixerX = songX.findall('fxmixer')
if len(fxmixerX): add_window_data(fxmixerX[0], cvpj_l, 'main', 'fxmixer')
fxX = fxmixerX[0].findall('fxchannel')
if len(fxmixerX):
add_window_data(fxmixerX[0], cvpj_l, 'main', 'fxmixer')
fxX = fxmixerX[0].findall('fxchannel')
tlX = songX.find('timeline')
@ -896,7 +897,7 @@ class input_lmms(plugin_input.base):
projnotesX = songX.findall('projectnotes')
lmms_decode_tracks(trksX)
lmms_decode_fxmixer(cvpj_l, fxX)
if len(fxmixerX): lmms_decode_fxmixer(cvpj_l, fxX)
trackdata = cvpj_l['track_data'] if 'track_data' in cvpj_l else {}

View File

@ -4,52 +4,38 @@
from functions import placement_data
from functions import song
from functions import note_data
from functions import colors
from functions import data_dataset
from functions_tracks import tracks_r
import plugin_input
import json
l_org_colors = [[0.23, 0.30, 0.99],
[0.62, 0.11, 0.12],
[0.62, 0.16, 0.87],
[0.14, 0.45, 0.26],
[0.13, 0.46, 0.57],
[0.67, 0.50, 0.11],
[0.59, 0.64, 0.71],
[0.58, 0.53, 0.49],
[0.23, 0.30, 0.99],
[0.62, 0.11, 0.12],
[0.62, 0.16, 0.87],
[0.14, 0.45, 0.26],
[0.13, 0.46, 0.57],
[0.67, 0.50, 0.11],
[0.59, 0.64, 0.71],
[0.58, 0.53, 0.49]
]
cur_val = 0
def org_stream(bio_org, org_numofnotes, maxchange, org_notelist, tnum):
global cur_val
for x in range(org_numofnotes):
pre_val = bio_org.read(1)[0]
if maxchange != None:
if 0 <= pre_val <= maxchange: cur_val = pre_val
org_notelist[x][tnum] = cur_val
else:
org_notelist[x][tnum] = pre_val
def read_orgtrack(bio_org, instrumentinfotable_input, trackid):
global cur_note
global cur_vol
global cur_pan
global cur_val
org_numofnotes = instrumentinfotable_input[trackid][3]
org_notelist = []
for x in range(org_numofnotes): org_notelist.append([0,0,0,0,0])
for x in range(org_numofnotes): #position
org_notelist[x][0] = int.from_bytes(bio_org.read(4), "little")
for x in range(org_numofnotes): #note
pre_note = bio_org.read(1)[0]
if 0 <= pre_note <= 95: cur_note = pre_note
org_notelist[x][1] = cur_note
for x in range(org_numofnotes): #duration
org_notelist[x][2] = bio_org.read(1)[0]
for x in range(org_numofnotes): #vol
pre_vol = bio_org.read(1)[0]
if 0 <= pre_vol <= 254: cur_vol = pre_vol
org_notelist[x][3] = cur_vol
for x in range(org_numofnotes): #pan
pre_pan = bio_org.read(1)[0]
if 0 <= pre_pan <= 12: cur_pan = pre_pan
org_notelist[x][4] = cur_pan
org_stream(bio_org, org_numofnotes, 95, org_notelist, 1) #note
org_stream(bio_org, org_numofnotes, 256, org_notelist, 2) #duration
org_stream(bio_org, org_numofnotes, 254, org_notelist, 3) #vol
org_stream(bio_org, org_numofnotes, 12, org_notelist, 4) #pan
org_l_nl = {}
for org_note in org_notelist: org_l_nl[org_note[0]] = org_note[1:5]
org_l_nl = dict(sorted(org_l_nl.items(), key=lambda item: item[0]))
@ -63,9 +49,7 @@ def read_orgtrack(bio_org, instrumentinfotable_input, trackid):
if notedata[1] != 1:
notedur = notedata[1]
endnote = org_l_n+notedur
if endnote != None:
if endnote-org_l_n == notedur: isinsidenote = False
else: isinsidenote = True
if endnote != None: isinsidenote = False if endnote-org_l_n == notedur else True
else: isinsidenote = False
if isinsidenote == False: cvpj_nl.append(note_data.rx_makenote(org_l_n, notedata[1], notedata[0]-48, notedata[2]/254, (notedata[3]-6)/6))
return cvpj_nl
@ -92,6 +76,7 @@ class input_orgyana(plugin_input.base):
cvpj_l = {}
dataset = data_dataset.dataset('./data_dset/orgyana.dset')
colordata = colors.colorset(dataset.colorset_e_list('track', 'orgmaker_2'))
bio_org = open(input_file, 'rb')
org_type = bio_org.read(6)
@ -128,7 +113,7 @@ class input_orgyana(plugin_input.base):
idval = 'org_'+str(tracknum)
tracks_r.track_create(cvpj_l, idval, 'instrument')
tracks_r.track_visual(cvpj_l, idval, name=trackname, color=l_org_colors[tracknum])
tracks_r.track_visual(cvpj_l, idval, name=trackname, color=colordata.getcolornum(tracknum))
tracks_r.track_param_add(cvpj_l, idval, 'pitch', (org_pitch-1000)/1800, 'float')
tracks_r.add_pl(cvpj_l, idval, 'notes', placement_data.nl2pl(s_cvpj_nl))

View File

@ -6,24 +6,14 @@ from functions import note_data
from functions import placement_data
from functions import plugins
from functions import song
from functions import colors
from functions import data_dataset
from functions_tracks import tracks_r
import plugin_input
import io
import struct
import json
peta_colors = [
[0.22, 0.52, 0.35],
[0.51, 0.88, 0.30],
[1.00, 0.95, 0.46],
[1.00, 0.75, 0.21],
[0.81, 0.47, 0.34],
[0.88, 0.25, 0.25],
[1.00, 0.50, 0.67],
[0.75, 0.25, 0.70],
[0.22, 0.60, 1.00],
[0.43, 0.93, 1.00]]
def getval(i_val):
if i_val == 91: i_val = 11
elif i_val == 11: i_val = 91
@ -57,31 +47,35 @@ class input_petaporon(plugin_input.base):
cvpj_notelists = {}
dataset = data_dataset.dataset('./data_dset/petaporon.dset')
colordata = colors.colorset(dataset.colorset_e_list('inst', 'main'))
for instnum in range(10):
cvpj_notelists[instnum] = []
pluginid = plugins.get_id()
instid = 'petaporon'+str(instnum)
tracks_r.track_create(cvpj_l, instid, 'instrument')
tracks_r.track_visual(cvpj_l, instid, name='Inst #'+str(instnum+1), color=peta_colors[instnum])
tracks_r.track_visual(cvpj_l, instid, name='Inst #'+str(instnum+1), color=colordata.getcolornum(instnum))
tracks_r.track_inst_pluginid(cvpj_l, instid, pluginid)
if instnum in [0,1,2,3,4,7]: plugins.add_plug(cvpj_l, pluginid, 'retro', 'square')
if instnum in [5,6]: plugins.add_plug(cvpj_l, pluginid, 'retro', 'triangle')
if instnum in [8]: plugins.add_plug(cvpj_l, pluginid, 'retro', 'noise')
if instnum in [0,1]: plugins.add_plug_data(cvpj_l, pluginid, 'duty', 2)
if instnum in [2,7]: plugins.add_plug_data(cvpj_l, pluginid, 'duty', 1)
if instnum in [3,4,5,6]: plugins.add_plug_data(cvpj_l, pluginid, 'duty', 0)
if instnum in [0,1,2,3,4,7]: inst_plugindata = plugins.cvpj_plugin('deftype','retro', 'square')
if instnum in [5,6]: inst_plugindata = plugins.cvpj_plugin('deftype','retro', 'triangle')
if instnum in [8]: inst_plugindata = plugins.cvpj_plugin('deftype','retro', 'noise')
if instnum in [0,1]: inst_plugindata.dataval_add('duty', 2)
if instnum in [2,7]: inst_plugindata.dataval_add('duty', 1)
if instnum in [3,4,5,6]: inst_plugindata.dataval_add('duty', 0)
if instnum == 0: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0.1, 0, 0, 1)
if instnum == 1: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0.1, 0.7, 0, 1)
if instnum == 2: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0.25, 0, 0, 1)
if instnum == 3: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0.2, 0, 0, 1)
if instnum == 4: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0, 1, 0, 1)
if instnum == 5: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0, 1, 0, 1)
if instnum == 6: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0.2, 0, 0, 1)
if instnum == 7: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0.3, 0, 0.3, 0.2, 0.3, 1)
if instnum == 8: plugins.add_asdr_env(cvpj_l, pluginid, 'vol', 0, 0, 0, 0.4, 0, 0, 1)
if instnum == 0: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0.1, 0, 0, 1)
if instnum == 1: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0.1, 0.7, 0, 1)
if instnum == 2: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0.25, 0, 0, 1)
if instnum == 3: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0.2, 0, 0, 1)
if instnum == 4: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0, 1, 0, 1)
if instnum == 5: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0, 1, 0, 1)
if instnum == 6: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0.2, 0, 0, 1)
if instnum == 7: inst_plugindata.asdr_env_add('vol', 0, 0.3, 0, 0.3, 0.2, 0.3, 1)
if instnum == 8: inst_plugindata.asdr_env_add('vol', 0, 0, 0, 0.4, 0, 0, 1)
inst_plugindata.to_cvpj(cvpj_l, pluginid)
for _ in range(len(peta_noteints)//5):
partdata = bio_peta_notebytes.read(5)

View File

@ -11,10 +11,10 @@ from functions import placement_data
from functions import plugins
from functions import song
from functions import note_data
from functions import colors
from functions import data_dataset
from functions_tracks import tracks_r
track_colors = [[0.25, 0.38, 0.49], [0.36, 0.43, 0.46], [0.51, 0.57, 0.47], [0.58, 0.64, 0.40]]
class input_piyopiyo(plugin_input.base):
def __init__(self): pass
def is_dawvert_plugin(self): return 'input'
@ -47,13 +47,14 @@ class input_piyopiyo(plugin_input.base):
recordspertrack = int.from_bytes(pmdfile.read(4), "little")
print("[input-piyopiyo] Records Per Track: " + str(recordspertrack))
#samplefolder = extra_param['samplefolder']
pmdtrackdata = []
keyoffset = [0,0,0,0]
cvpj_l = {}
dataset = data_dataset.dataset('./data_dset/piyopiyo.dset')
colordata = colors.colorset(dataset.colorset_e_list('inst', 'main'))
for tracknum in range(3):
print("[input-piyopiyo] Track " + str(tracknum+1), end=",")
trk_octave = pmdfile.read(1)[0]
@ -79,7 +80,7 @@ class input_piyopiyo(plugin_input.base):
inst_plugindata.to_cvpj(cvpj_l, pluginid)
tracks_r.track_create(cvpj_l, idval, 'instrument')
tracks_r.track_visual(cvpj_l, idval, name='Inst #'+str(tracknum), color=track_colors[tracknum])
tracks_r.track_visual(cvpj_l, idval, name='Inst #'+str(tracknum), color=colordata.getcolornum(tracknum))
tracks_r.track_inst_pluginid(cvpj_l, idval, pluginid)
tracks_r.track_param_add(cvpj_l, idval, 'vol', trk_volume/250, 'float')
@ -89,7 +90,7 @@ class input_piyopiyo(plugin_input.base):
inst_plugindata.to_cvpj(cvpj_l, "3")
tracks_r.track_create(cvpj_l, "3", 'instrument')
tracks_r.track_visual(cvpj_l, "3", name='perc', color=track_colors[3])
tracks_r.track_visual(cvpj_l, "3", name='perc', color=colordata.getcolornum(3))
tracks_r.track_inst_pluginid(cvpj_l, "3", "3")
tracks_r.track_param_add(cvpj_l, "3", 'vol', TrackPVol/250, 'float')

View File

@ -23,19 +23,22 @@ import math
def get_paramval(i_params, i_name):
outval = 0
automation = []
if i_name in i_params:
if 'value' in i_params[i_name]:
outval = i_params[i_name]['value']
return outval
if 'value' in i_params[i_name]: outval = i_params[i_name]['value']
if 'automation' in i_params[i_name]: automation = i_params[i_name]['automation']
return outval, automation
def get_param(i_name, plugindata, i_params):
plugindata.param_add(i_name, get_paramval(i_params, i_name), 'float', i_name)
value_out, automation = get_paramval(i_params, i_name)
plugindata.param_add(i_name, value_out, 'float', i_name)
return automation
def get_asdr(pluginid, plugindata, sound_instdata):
asdr_a = get_paramval(sound_instdata, 'attack')
asdr_s = get_paramval(sound_instdata, 'sustain')
asdr_d = get_paramval(sound_instdata, 'decay')
asdr_r = get_paramval(sound_instdata, 'release')
asdr_a = get_paramval(sound_instdata, 'attack')[0]
asdr_s = get_paramval(sound_instdata, 'sustain')[0]
asdr_d = get_paramval(sound_instdata, 'decay')[0]
asdr_r = get_paramval(sound_instdata, 'release')[0]
plugindata.asdr_env_add('vol', 0, asdr_a, 0, asdr_d, asdr_s, asdr_r, 1)
def parse_clip_notes(sndstat_clip):
@ -62,26 +65,40 @@ def parse_clip_notes(sndstat_clip):
cvpj_pldata["duration"] = sndstat_clip_loopduration
cvpj_pldata['cut'] = placement_data.cutloopdata(-sndstat_clip_contentPosition, -sndstat_clip_contentPosition, sndstat_clip_duration)
for sndstat_note in sndstat_clip['notes']:
cvpj_notelist.append(
note_data.rx_makenote(
sndstat_note['position']/ticksdiv,
sndstat_note['length']/ticksdiv,
sndstat_note['note']-60,
sndstat_note['velocity'],
None)
)
cvpj_pldata["notelist"] = cvpj_notelist
cvpj_notelist = note_data.notelist(ticksdiv*4, None)
for sndstat_note in sndstat_clip['notes']: cvpj_notelist.add_r(sndstat_note['position'], sndstat_note['length'], sndstat_note['note']-60, sndstat_note['velocity'], None)
cvpj_pldata["notelist"] = cvpj_notelist.to_cvpj()
placement_data.unminus(cvpj_pldata)
return cvpj_pldata
def sngauto_to_cvpjauto(autopoints):
sngauto = []
for autopoint in autopoints:
sngauto.append({"position": autopoint['pos']/ticksdiv, "value": autopoint['value']})
sngauto.append({"position": autopoint['pos']//ticksdiv, "value": float(autopoint['value'])})
return sngauto
def autoall_cvpj_to_sng(sng_device, cvpj_plugindata, fxpluginname):
paramlist = dataset.params_list('plugin', fxpluginname)
if paramlist:
for paramid in paramlist:
outval, outauto = get_paramval(sng_device, paramid)
cvpj_plugindata.param_add_dset(paramid, outval, dataset, 'plugin', fxpluginname)
if outauto not in [None, []]: auto_data.add_pl(cvpj_l, 'float', ['plugin',fxpluginid,paramid], auto_nopl.to_pl(sngauto_to_cvpjauto(outauto)))
def eq_calc_q(band_type, q_val):
if band_type in ['low_pass', 'high_pass']:
q_val = q_val*math.log(162)
q_val = 0.1 * math.exp(q_val)
q_val = xtramath.logpowmul(q_val, 0.5)
elif band_type in ['low_shelf', 'high_shelf']:
q_val = q_val*math.log(162)
q_val = 0.1 * math.exp(q_val)
else:
q_val = q_val*math.log(162)
#q_val = 0.1 * math.exp(q_val)
q_val = xtramath.logpowmul(q_val, -1)
return q_val
class input_soundation(plugin_input.base):
def __init__(self): pass
def is_dawvert_plugin(self): return 'input'
@ -97,6 +114,7 @@ class input_soundation(plugin_input.base):
def parse(self, input_file, extra_param):
global cvpj_l
global ticksdiv
global dataset
bytestream = open(input_file, 'r')
sndstat_data = json.load(bytestream)
@ -170,17 +188,17 @@ class input_soundation(plugin_input.base):
get_asdr(pluginid, inst_plugindata, sound_instdata)
v_gain = get_paramval(sound_instdata, 'gain')
v_gain = get_paramval(sound_instdata, 'gain')[0]
inst_plugindata.dataval_add('gain', v_gain)
v_start = get_paramval(sound_instdata, 'start')
v_end = get_paramval(sound_instdata, 'end')
v_start = get_paramval(sound_instdata, 'start')[0]
v_end = get_paramval(sound_instdata, 'end')[0]
inst_plugindata.dataval_add('start', v_start)
inst_plugindata.dataval_add('end', v_end)
v_loop_mode = get_paramval(sound_instdata, 'loop_mode')
v_loop_start = get_paramval(sound_instdata, 'loop_start')
v_loop_end = get_paramval(sound_instdata, 'loop_end')
v_loop_mode = get_paramval(sound_instdata, 'loop_mode')[0]
v_loop_start = get_paramval(sound_instdata, 'loop_start')[0]
v_loop_end = get_paramval(sound_instdata, 'loop_end')[0]
cvpj_loopdata = {}
if v_loop_mode != 0 :
@ -191,18 +209,18 @@ class input_soundation(plugin_input.base):
inst_plugindata.dataval_add('loop', cvpj_loopdata)
inst_plugindata.dataval_add('point_value_type', "percent")
v_coarse = (get_paramval(sound_instdata, 'coarse')-0.5)*2
v_fine = (get_paramval(sound_instdata, 'fine')-0.5)*2
v_root_note = get_paramval(sound_instdata, 'root_note')
v_coarse = (get_paramval(sound_instdata, 'coarse')[0]-0.5)*2
v_fine = (get_paramval(sound_instdata, 'fine')[0]-0.5)*2
v_root_note = get_paramval(sound_instdata, 'root_note')[0]
tracks_r.track_param_add(cvpj_l, trackid, 'pitch', v_coarse*48 + v_fine, 'float')
tracks_r.track_dataval_add(cvpj_l, trackid, 'instdata', 'middlenote', v_root_note-60)
v_crossfade = get_paramval(sound_instdata, 'crossfade')
v_playback_direction = get_paramval(sound_instdata, 'playback_direction')
v_interpolation_mode = get_paramval(sound_instdata, 'interpolation_mode')
v_release_mode = get_paramval(sound_instdata, 'release_mode')
v_portamento_time = get_paramval(sound_instdata, 'portamento_time')
v_crossfade = get_paramval(sound_instdata, 'crossfade')[0]
v_playback_direction = get_paramval(sound_instdata, 'playback_direction')[0]
v_interpolation_mode = get_paramval(sound_instdata, 'interpolation_mode')[0]
v_release_mode = get_paramval(sound_instdata, 'release_mode')[0]
v_portamento_time = get_paramval(sound_instdata, 'portamento_time')[0]
if v_interpolation_mode == 0: cvpj_interpolation = "none"
if v_interpolation_mode == 1: cvpj_interpolation = "linear"
@ -211,7 +229,7 @@ class input_soundation(plugin_input.base):
elif instpluginname == 'com.soundation.drummachine':
inst_plugindata = plugins.cvpj_plugin('deftype', 'native-soundation', instpluginname)
kit_name = get_paramval(sound_instdata, 'kit_name')
kit_name = get_paramval(sound_instdata, 'kit_name')[0]
for paramname in ["gain_2", "hold_1", "pitch_6", "gain_1", "decay_5", "gain_5", "hold_0", "hold_2", "pitch_7", "gain_0", "decay_6", "gain_3", "hold_5", "pitch_3", "decay_4", "pitch_4", "gain_6", "decay_7", "pitch_2", "hold_6", "decay_1", "decay_3", "decay_0", "decay_2", "gain_7", "pitch_0", "pitch_5", "hold_3", "pitch_1", "hold_4", "hold_7", "gain_4"]:
get_param(paramname, inst_plugindata, sound_instdata)
inst_plugindata.dataval_add('kit_name', kit_name)
@ -221,15 +239,17 @@ class input_soundation(plugin_input.base):
paramlist = dataset_synth_nonfree.params_list('plugin', 'europa')
for paramid in paramlist:
outval = None
if paramid in sound_instdata:
if 'value' in sound_instdata[paramid]: outval = sound_instdata[paramid]['value']
param = dataset_synth_nonfree.params_i_get('plugin', 'europa', paramid)
sng_paramid = "/custom_properties/"+param[5]
if sng_paramid in sound_instdata:
if 'value' in sound_instdata[sng_paramid]: outval = sound_instdata[sng_paramid]['value']
inst_plugindata.param_add_dset(paramid, outval, dataset_synth_nonfree, 'plugin', 'europa')
elif instpluginname == 'com.soundation.GM-2':
inst_plugindata = plugins.cvpj_plugin('deftype', 'native-soundation', instpluginname)
get_asdr(pluginid, inst_plugindata, sound_instdata)
if 'value' in sound_instdata['sample_pack']:
sample_pack = get_paramval(sound_instdata, 'sample_pack')
sample_pack = get_paramval(sound_instdata, 'sample_pack')[0]
inst_plugindata.dataval_add('sample_pack', sample_pack)
elif instpluginname == 'com.soundation.noiser':
@ -260,13 +280,13 @@ class input_soundation(plugin_input.base):
elif instpluginname == 'com.soundation.simple':
inst_plugindata = plugins.cvpj_plugin('deftype', 'native-soundation', instpluginname)
get_asdr(pluginid, inst_plugindata, sound_instdata)
asdrf_a = get_paramval(sound_instdata, 'filter_attack')
asdrf_s = get_paramval(sound_instdata, 'filter_decay')
asdrf_d = get_paramval(sound_instdata, 'filter_sustain')
asdrf_r = get_paramval(sound_instdata, 'filter_release')
asdrf_i = get_paramval(sound_instdata, 'filter_int')
asdrf_a = get_paramval(sound_instdata, 'filter_attack')[0]
asdrf_s = get_paramval(sound_instdata, 'filter_decay')[0]
asdrf_d = get_paramval(sound_instdata, 'filter_sustain')[0]
asdrf_r = get_paramval(sound_instdata, 'filter_release')[0]
asdrf_i = get_paramval(sound_instdata, 'filter_int')[0]
inst_plugindata.asdr_env_add('cutoff', 0, asdrf_a, 0, asdrf_d, asdrf_s, asdrf_r, asdrf_i)
filter_cutoff = xtramath.between_from_one(20, 7500, get_paramval(sound_instdata, 'filter_cutoff'))
filter_cutoff = xtramath.between_from_one(20, 7500, get_paramval(sound_instdata, 'filter_cutoff')[0])
filter_reso = get_paramval(sound_instdata, 'filter_resonance')
inst_plugindata.filter_add(True, filter_cutoff, filter_reso, 'lowpass', None)
for oscnum in range(4):
@ -281,10 +301,12 @@ class input_soundation(plugin_input.base):
fxpluginname = sound_chan_effect['identifier']
fxenabled = not sound_chan_effect['bypass']
fxslot.insert(cvpj_l, ['master'] if ismaster else ['track', trackid], 'audio', fxpluginid)
if fxpluginname == 'com.soundation.parametric-eq':
fx_plugindata = plugins.cvpj_plugin('deftype', 'universal', 'eq-bands')
fx_plugindata.fxdata_add(fxenabled, 1)
bandnum = 1
for eqname in ["highshelf","hpf","lowshelf","lpf","peak1","peak2","peak3","peak4"]:
eq_bandtype = 'peak'
@ -293,29 +315,31 @@ class input_soundation(plugin_input.base):
if eqname == 'lowshelf': eq_bandtype = 'low_shelf'
if eqname == 'lpf': eq_bandtype = 'low_pass'
band_enable = get_paramval(sound_chan_effect, eqname+'_enable')
band_freq = get_paramval(sound_chan_effect, eqname+'_freq')
band_gain = get_paramval(sound_chan_effect, eqname+'_gain')
band_res = get_paramval(sound_chan_effect, eqname+'_res')
band_enable, auto_enable = get_paramval(sound_chan_effect, eqname+'_enable')
band_freq, auto_freq = get_paramval(sound_chan_effect, eqname+'_freq')
band_gain, auto_gain = get_paramval(sound_chan_effect, eqname+'_gain')
band_res, auto_res = get_paramval(sound_chan_effect, eqname+'_res')
band_freq = 20 * 1000**band_freq
band_gain = (band_gain-0.5)*40
if eq_bandtype in ['low_pass', 'high_pass']:
band_res = band_res*math.log(162)
band_res = 0.1 * math.exp(band_res)
band_res = xtramath.logpowmul(band_res, 0.5)
elif eq_bandtype in ['low_shelf', 'high_shelf']:
band_res = band_res*math.log(162)
band_res = 0.1 * math.exp(band_res)
else:
band_res = band_res*math.log(162)
band_res = 0.1 * math.exp(band_res)
band_res = xtramath.logpowmul(band_res, -1)
band_res = eq_calc_q(eq_bandtype, band_res)
fx_plugindata.eqband_add(int(band_enable), band_freq, band_gain, eq_bandtype, band_res, None)
if auto_enable:
auto_data.add_pl(cvpj_l, 'float', ['plugin_eq',fxpluginid,str(bandnum)+'_on'], auto_nopl.to_pl(sngauto_to_cvpjauto(auto_enable)))
if auto_freq:
for point in auto_freq: point['value'] = 20 * 1000**point['value']
auto_data.add_pl(cvpj_l, 'float', ['plugin_eq',fxpluginid,str(bandnum)+'_freq'], auto_nopl.to_pl(sngauto_to_cvpjauto(auto_freq)))
if auto_gain:
for point in auto_gain: point['value'] = (point['value']-0.5)*40
auto_data.add_pl(cvpj_l, 'float', ['plugin_eq',fxpluginid,str(bandnum)+'_gain'], auto_nopl.to_pl(sngauto_to_cvpjauto(auto_gain)))
if auto_res:
for point in auto_res: point['value'] = eq_calc_q(eq_bandtype, point['value'])
auto_data.add_pl(cvpj_l, 'float', ['plugin_eq',fxpluginid,str(bandnum)+'_res'], auto_nopl.to_pl(sngauto_to_cvpjauto(auto_res)))
master_gain = get_paramval(sound_chan_effect, 'master_gain')
fx_plugindata.eqband_add(int(band_enable), band_freq, band_gain, eq_bandtype, band_res, None)
bandnum += 1
master_gain = get_paramval(sound_chan_effect, 'master_gain')[0]
master_gain = (master_gain-0.5)*40
fx_plugindata.param_add('gain_out', master_gain, 'float', 'Out Gain')
@ -323,13 +347,7 @@ class input_soundation(plugin_input.base):
fx_plugindata = plugins.cvpj_plugin('deftype', 'native-soundation', fxpluginname)
fx_plugindata.fxdata_add(fxenabled, 1)
paramlist = dataset.params_list('plugin', fxpluginname)
if paramlist:
for paramid in paramlist:
outval = None
if paramid in sound_chan_effect:
if 'value' in sound_chan_effect[paramid]: outval = sound_chan_effect[paramid]['value']
fx_plugindata.param_add_dset(paramid, outval, dataset, 'plugin', fxpluginname)
autoall_cvpj_to_sng(sound_chan_effect, fx_plugindata, fxpluginname)
fx_plugindata.to_cvpj(cvpj_l, fxpluginid)

View File

@ -36,18 +36,14 @@ def parse_clip_notes(j_wvtl_trackclip, j_wvtl_tracktype):
cvpj_pldata["duration"] = j_wvtl_trc_timelineEnd*4 - j_wvtl_trc_timelineStart*4
cvpj_pldata['cut'] = placement_data.cutloopdata(j_wvtl_trc_readStart*4, j_wvtl_trc_loopStart*4, j_wvtl_trc_loopEnd*4)
cvpj_notelist = note_data.notelist(1, None)
if j_wvtl_trc_type == 'MIDI':
if 'notes' in j_wvtl_trackclip:
for j_wvtl_n in j_wvtl_trackclip['notes']:
cvpj_notelist.append(
note_data.rx_makenote(
j_wvtl_n['start']*4,
j_wvtl_n['end']*4 - j_wvtl_n['start']*4,
j_wvtl_n['pitch']-60,
j_wvtl_n['velocity'],
None))
cvpj_notelist.add_r(j_wvtl_n['start'], j_wvtl_n['end']-j_wvtl_n['start'], j_wvtl_n['pitch']-60, j_wvtl_n['velocity'], None)
cvpj_pldata["notelist"] = cvpj_notelist
cvpj_pldata["notelist"] = cvpj_notelist.to_cvpj()
return cvpj_pldata
# -------------------------------------------- audio --------------------------------------------
@ -99,8 +95,6 @@ def parse_clip_audio(j_wvtl_trackclip, j_wvtl_tracktype):
cvpj_pldata['cut']['loopstart'] = j_wvtl_trc_loopStart*4
cvpj_pldata['cut']['loopend'] = j_wvtl_trc_loopEnd*4
#print( j_wvtl_trc_transpose, pow(2, j_wvtl_trc_transpose/12) )
cvpj_pldata['audiomod'] = {}
cvpj_pldata['audiomod']['stretch_algorithm'] = 'beats'
cvpj_pldata['audiomod']['pitch'] = j_wvtl_trc_transpose

View File

@ -7,6 +7,8 @@ from functions import note_data
from functions import placement_data
from functions import plugins
from functions import song
from functions import colors
from functions import data_dataset
from functions_tracks import tracks_rm
from functions_tracks import tracks_master
from functions_tracks import fxslot
@ -15,19 +17,6 @@ import base64
import json
import zlib
onebd_colors = [
[0.14, 1.00, 0.60],
[1.00, 0.87, 0.18],
[0.76, 0.41, 1.00],
[0.97, 0.51, 0.00],
[0.76, 0.76, 0.76],
[0.76, 0.58, 0.38],
[0.47, 0.49, 0.88],
[0.59, 0.73, 0.23],
[0.88, 0.35, 0.53]
]
def tnotedata_to_cvpj_nl(cvpj_notelist, instid, in_notedata, note):
for tnote in in_notedata:
duration = tnote[1]['duration'] if 'duration' in tnote[1] else 1
@ -95,7 +84,7 @@ def decodeblock(cvpj_l, input_block, position):
if blockinstid not in used_instruments: used_instruments.append(blockinstid)
placementdata = placement_data.makepl_n(position, pl_dur, notelist)
placementdata['name'] = instdata[instnum]['preset']
placementdata['color'] = onebd_colors[instnum]
placementdata['color'] = colordata.getcolornum(instnum)
longpldata = placement_data.longpl_split(placementdata)
for longpls in longpldata:
tracks_rm.add_pl(cvpj_l, instnum+1, 'notes', longpls)
@ -108,7 +97,7 @@ def decodeblock(cvpj_l, input_block, position):
if blockdrumid not in used_instruments: used_instruments.append(blockdrumid)
placementdata = placement_data.makepl_n(position, pl_dur, notelist)
placementdata['name'] = drumsdata[drumnum]['preset']
placementdata['color'] = onebd_colors[drumnumminv+4]
placementdata['color'] = colordata.getcolornum(drumnumminv+4)
longpldata = placement_data.longpl_split(placementdata)
for longpls in longpldata:
tracks_rm.add_pl(cvpj_l, drumnumminv+5, 'notes', longpls)
@ -129,6 +118,7 @@ class input_1bitdragon(plugin_input.base):
global used_instruments
global used_instrument_data
global cvpj_scale
global colordata
song_file = open(input_file, 'r')
basebase64stream = base64.b64decode(song_file.read())
@ -136,6 +126,9 @@ class input_1bitdragon(plugin_input.base):
bio_base64stream.seek(4)
decompdata = json.loads(zlib.decompress(bio_base64stream.read(), 16+zlib.MAX_WBITS))
dataset = data_dataset.dataset('./data_dset/1bitdragon.dset')
colordata = colors.colorset(dataset.colorset_e_list('track', 'main'))
cvpj_l = {}
used_instruments = []
used_instrument_data = {}
@ -160,7 +153,7 @@ class input_1bitdragon(plugin_input.base):
for plnum in range(9):
tracks_rm.track_create(cvpj_l, str(plnum+1), 'instruments')
tracks_rm.track_visual(cvpj_l, str(plnum+1), color=onebd_colors[plnum])
tracks_rm.track_visual(cvpj_l, str(plnum+1), color=colordata.getcolornum(plnum))
curpos = 0
for blocknum in range(len(decompdata['blocks'])):

View File

@ -6,6 +6,7 @@ from functions import placement_data
from functions import data_dataset
from functions import auto
from functions import data_bytes
from functions import data_values
from functions import plugins
from functions import song
from functions_tracks import tracks_rm
@ -95,12 +96,6 @@ def parsetrack_float(file_stream, i_mul, i_add):
track_rol_events.append(track_rol_part)
return track_name, track_rol_events
def closest(myList, in_value):
outval = 0
for num in myList:
if num <= in_value: outval = num
return outval
def parsetrack(file_stream, tracknum, notelen):
rol_tr_voice = parsetrack_voice(file_stream)
rol_tr_timbre = parsetrack_timbre(file_stream)
@ -117,7 +112,7 @@ def parsetrack(file_stream, tracknum, notelen):
curtrackpos = 0
for rol_notedata in rol_tr_voice[1]:
if rol_notedata[0] >= 12:
cvpj_noteinst = rol_tr_timbre[1][closest(timbrepoints, curtrackpos)]
cvpj_noteinst = rol_tr_timbre[1][data_values.closest(timbrepoints, curtrackpos)]
cvpj_notelist.append(note_data.mx_makenote(cvpj_noteinst.upper(), curtrackpos*notelen, rol_notedata[1]*notelen, rol_notedata[0]-48, None, None))
curtrackpos += rol_notedata[1]
@ -247,23 +242,17 @@ class input_adlib_rol(plugin_input.base):
print("[input-adlib_rol] cVolumeEvents: " + str(rol_header_cVolumeEvents))
print("[input-adlib_rol] cPitchEvents: " + str(rol_header_cPitchEvents))
print("[input-adlib_rol] cTempoEvents: " + str(rol_header_cTempoEvents))
song_file.read(38) #Padding
notelen = (2/rol_header_tickBeat)*2
t_tempo_data = parsetrack_tempo(song_file, notelen)
auto_nopl.twopoints(['main', 'bpm'], 'float', t_tempo_data[2], notelen, 'instant')
for tracknum in range(10):
parsetrack(song_file, tracknum, (2/rol_header_tickBeat)*2)
for tracknum in range(10): parsetrack(song_file, tracknum, (2/rol_header_tickBeat)*2)
auto_nopl.to_cvpj(cvpj_l)
cvpj_l['do_addloop'] = True
cvpj_l['do_singlenotelistcut'] = True
song.add_timesig(cvpj_l, rol_header_beatMeasure, 4)
song.add_param(cvpj_l, 'bpm', t_tempo_data[1])
return json.dumps(cvpj_l)

View File

@ -5,12 +5,14 @@ import plugin_input
import json
import os.path
import struct
from functions import placements
from functions import placement_data
from functions import auto
from functions import note_data
from functions import colors
from functions import data_bytes
from functions import data_dataset
from functions import data_values
from functions import note_data
from functions import placement_data
from functions import placements
from functions import plugins
from functions import song
from functions_tracks import tracks_rm
@ -257,8 +259,6 @@ def lc_parse_placements(sl_json, tracknum, pl_color, ischord):
position += length
return placements
lc_colors = [[0.83, 0.09, 0.42],[0.91, 0.76, 0.36],[0.22, 0.36, 0.60],[0.44, 0.78, 0.66],[0.64, 0.64, 0.64]]
class input_lc(plugin_input.base):
def __init__(self): pass
def is_dawvert_plugin(self): return 'input'
@ -286,8 +286,11 @@ class input_lc(plugin_input.base):
cvpj_l = {}
dataset = data_dataset.dataset('./data_dset/lovelycomposer.dset')
colordata = colors.colorset(dataset.colorset_e_list('track', 'main'))
for num in range(5):
cvpj_placements = lc_parse_placements(lc_channels[num]["sl"], num, lc_colors[num], num == 4)
cvpj_placements = lc_parse_placements(lc_channels[num]["sl"], num, colordata.getcolornum(num), num == 4)
cvpj_plname = "Part "+str(num+1) if num != 4 else "Chord"
tracks_rm.track_create(cvpj_l, str(num), 'instruments')
tracks_rm.track_visual(cvpj_l, str(num), name=cvpj_plname)
@ -324,11 +327,11 @@ class input_lc(plugin_input.base):
plugindata.to_cvpj(cvpj_l, cvpj_instid)
tracks_rm.inst_create(cvpj_l, cvpj_instid)
tracks_rm.inst_visual(cvpj_l, cvpj_instid, name=used_instrument[1], color=lc_colors[used_instrument[0]])
tracks_rm.inst_visual(cvpj_l, cvpj_instid, name=used_instrument[1], color=colordata.getcolornum(used_instrument[0]))
tracks_rm.inst_pluginid(cvpj_l, cvpj_instid, cvpj_instid)
tracks_rm.inst_create(cvpj_l, 'chord')
tracks_rm.inst_visual(cvpj_l, 'chord', name='Chord', color=lc_colors[4])
tracks_rm.inst_visual(cvpj_l, 'chord', name='Chord', color=colordata.getcolornum(4))
startinststr = 'lc_instlist_'

View File

@ -8,6 +8,8 @@ from functions import plugins
from functions import placement_data
from functions import note_data
from functions import song
from functions import colors
from functions import data_dataset
from functions_tracks import tracks_rm
from functions_tracks import auto_nopl
import plugin_input
@ -30,29 +32,8 @@ ptcop_events[13] = 'Group # '
ptcop_events[14] = 'Key Corr'
ptcop_events[15] = 'Pan Time'
colors_inst = [
[0.94, 0.50, 0.00],
[0.41, 0.47, 1.00],
[0.79, 0.72, 0.72],
[0.68, 0.25, 1.00],
[0.57, 0.78, 0.00],
[0.99, 0.20, 0.80],
[0.00, 0.75, 0.38],
[1.00, 0.47, 0.36],
[0.00, 0.74, 1.00]]
global colornum
colornum = 0
def get_float(in_int): return struct.unpack("<f", struct.pack("I", in_int))[0]
def getcolor():
global colornum
out_color = colors_inst[colornum]
colornum += 1
if colornum == 9: colornum = 0
return out_color
def parse_event(bio_stream):
position = varint.decode_stream(bio_stream)
unitnum = int.from_bytes(bio_stream.read(1), "little")
@ -191,6 +172,9 @@ class input_pxtone(plugin_input.base):
song_filesize = song_file.tell()
song_file.seek(0)
dataset = data_dataset.dataset('./data_dset/pxtone.dset')
colordata = colors.colorset(dataset.colorset_e_list('track', 'main'))
ptcop_header = song_file.read(16)
ptcop_unk = int.from_bytes(song_file.read(4), "little")
ptcop_unit_events = {}
@ -445,7 +429,7 @@ class input_pxtone(plugin_input.base):
cvpj_trackparams = trackautop[unitnum]
for cvpj_note in cvpj_notelist: note_mod.notemod_conv(cvpj_note)
plt_name = ptcop_name_unit[unitnum] if unitnum in ptcop_name_unit else None
cvpj_instcolor = getcolor()
cvpj_instcolor = colordata.getcolor()
cvpj_trackid = str(unitnum+1)
tracks_rm.track_create(cvpj_l, cvpj_trackid, 'instruments')
tracks_rm.track_visual(cvpj_l, cvpj_trackid, name=plt_name, color=cvpj_instcolor)

View File

@ -701,7 +701,7 @@ def lmms_encode_audio_track(xmltag, trkJ, trackid, trkplacementsJ):
auto_add_track(trkX_samptr, 'pan', 'pan', 0, [0, 100], trackid, trackname, 'Pan')
auto_add_track(xmltag, 'enabled', 'muted', 1, [-1, -1], trackid, trackname, 'On')
trkX_samptr.set('fxch', str(tracks_r.track_fxrackchan_get(cvpj_l, trackid)))
trkX_samptr.set('mixch', str(tracks_r.track_fxrackchan_get(cvpj_l, trackid)))
if 'chain_fx_audio' in trkJ: lmms_encode_fxchain(trkX_samptr, trkJ)
@ -754,7 +754,7 @@ def lmms_encode_effectslot(pluginid, fxcX):
xml_lmmsreverbsc.set('color', '10000')
elif plugintype == ['universal', 'eq-bands']:
data_LP, data_Lowshelf, data_Peaks, data_HighShelf, data_HP = fx_plugindata.eqband_get_limited(None)
data_LP, data_Lowshelf, data_Peaks, data_HighShelf, data_HP, data_auto = fx_plugindata.eqband_get_limited(None)
print('[output-lmms] Audio FX: [eq] ')
fxslotX.set('name', 'eq')

View File

@ -9,6 +9,7 @@ import math
from functions import xtramath
from functions import colors
from functions import data_values
from functions import data_dataset
from functions import plugins
from functions import idvals
from functions import notelist_data
@ -17,7 +18,6 @@ from functions import auto
from functions import song
from functions_compat import trackfx_to_numdata
from functions_plugin import soundation_values
from functions_plugin import synth_nonfree_values
from functions_tracks import auto_nopl
from functions_tracks import tracks_r
@ -36,24 +36,23 @@ def makechannel(i_type):
}
def set_asdr(sng_instparams, asdr_a, asdr_s, asdr_d, asdr_r):
add_sndinstparam(sng_instparams, 'attack', asdr_a, True)
add_sndinstparam(sng_instparams, 'sustain', asdr_s, True)
add_sndinstparam(sng_instparams, 'decay', asdr_d, True)
add_sndinstparam(sng_instparams, 'release', asdr_r, True)
add_sndinstparam(sng_instparams, 'attack', asdr_a, [])
add_sndinstparam(sng_instparams, 'sustain', asdr_s, [])
add_sndinstparam(sng_instparams, 'decay', asdr_d, [])
add_sndinstparam(sng_instparams, 'release', asdr_r, [])
def add_sndinstparam(i_dict, i_name, i_value, i_auto):
if i_auto == True: i_dict[i_name] = {"value": i_value, "automation": []}
if i_auto != None: i_dict[i_name] = {"value": i_value, "automation": i_auto}
else: i_dict[i_name] = {"value": i_value}
def cvpjidata_to_sngparam(cvpj_plugindata, i_dict, pluginid, i_name, i_fallback):
value = cvpj_plugindata.dataval_get(i_name, i_fallback)
add_sndinstparam(i_dict, i_name, value, False)
add_sndinstparam(i_dict, i_name, value, None)
def cvpjiparam_to_sngparam(cvpj_plugindata, i_dict, pluginid, i_name, i_fallback, i_auto):
value = cvpj_plugindata.param_get(i_name, i_fallback)[0]
add_sndinstparam(i_dict, i_name, value, i_auto)
def eq_calc_pass(i_value):
i_value = xtramath.logpowmul(i_value, 0.5)
i_value = math.log(i_value / 0.1)
@ -65,6 +64,18 @@ def eq_calc_shelf(i_value):
i_value = i_value / math.log(162)
return i_value
def eq_calc_peak(i_value):
i_value = xtramath.logpowmul(i_value, -1)
i_value = math.log( i_value / 0.1)
i_value = i_value / math.log(162)
return i_value
def eq_calc_freq(i_value): return (math.log(i_value/20) / math.log(1000)) if i_value != 0 else 0
def eq_calc_gain(i_value): return (i_value/40)+0.5
def add_fx(sng_trkdata, s_trackdata):
sng_fxchain = sng_trkdata['effects']
@ -80,14 +91,11 @@ def add_fx(sng_trkdata, s_trackdata):
sng_fxdata['identifier'] = 'com.soundation.parametric-eq'
sng_fxdata['bypass'] = not fx_on
data_LP, data_Lowshelf, data_Peaks, data_HighShelf, data_HP = fx_plugindata.eqband_get_limited(None)
data_LP, data_Lowshelf, data_Peaks, data_HighShelf, data_HP, data_auto = fx_plugindata.eqband_get_limited(None)
for num in range(4):
band_res = data_Peaks[num][4]
if band_res != 0:
band_res = xtramath.logpowmul(band_res, -1)
band_res = math.log( band_res / 0.1)
band_res = band_res / math.log(162)
if band_res != 0: band_res = eq_calc_peak(band_res)
else: band_res = 0
data_Peaks[num][4] = band_res
@ -99,34 +107,48 @@ def add_fx(sng_trkdata, s_trackdata):
gain_out = fx_plugindata.param_get('gain_out', 0)[0]
add_sndinstparam(sng_fxdata, 'highshelf_enable', data_HighShelf[1], True)
add_sndinstparam(sng_fxdata, 'highshelf_freq', data_HighShelf[2], True)
add_sndinstparam(sng_fxdata, 'highshelf_gain', data_HighShelf[3], True)
add_sndinstparam(sng_fxdata, 'highshelf_res', data_HighShelf[4], True)
add_sndinstparam(sng_fxdata, 'lpf_enable', data_LP[1], [])
add_sndinstparam(sng_fxdata, 'lpf_freq', eq_calc_freq(data_LP[2]), [])
add_sndinstparam(sng_fxdata, 'lpf_res', data_LP[4], [])
add_sndinstparam(sng_fxdata, 'lpf_slope', 0.25, [])
add_sndinstparam(sng_fxdata, 'hpf_enable', data_HP[1], True)
add_sndinstparam(sng_fxdata, 'hpf_freq', data_HP[2], True)
add_sndinstparam(sng_fxdata, 'hpf_res', data_HP[4], True)
add_sndinstparam(sng_fxdata, 'hpf_slope', 0.25, True)
add_sndinstparam(sng_fxdata, 'lowshelf_enable', data_Lowshelf[1], [])
add_sndinstparam(sng_fxdata, 'lowshelf_freq', eq_calc_freq(data_Lowshelf[2]), [])
add_sndinstparam(sng_fxdata, 'lowshelf_gain', eq_calc_gain(data_Lowshelf[3]), [])
add_sndinstparam(sng_fxdata, 'lowshelf_res', data_Lowshelf[4], [])
add_sndinstparam(sng_fxdata, 'lowshelf_enable', data_Lowshelf[1], True)
add_sndinstparam(sng_fxdata, 'lowshelf_freq', data_Lowshelf[2], True)
add_sndinstparam(sng_fxdata, 'lowshelf_gain', data_Lowshelf[3], True)
add_sndinstparam(sng_fxdata, 'lowshelf_res', data_Lowshelf[4], True)
add_sndinstparam(sng_fxdata, 'lpf_enable', data_LP[1], True)
add_sndinstparam(sng_fxdata, 'lpf_freq', data_LP[2], True)
add_sndinstparam(sng_fxdata, 'lpf_res', data_LP[4], True)
add_sndinstparam(sng_fxdata, 'lpf_slope', 0.25, True)
#for peaknum in range(4):
# peakstr = str(peaknum+1)
add_sndinstparam(sng_fxdata, 'master_gain', (gain_out/40)+0.5, True)
#peak_auto = [[],[],[],[]]
for peaknum in range(4):
peakstr = str(peaknum+1)
add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_enable', data_Peaks[peaknum][1], True)
add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_freq', data_Peaks[peaknum][2], True)
add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_gain', data_Peaks[peaknum][3], True)
add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_res', data_Peaks[peaknum][4], True)
#if data_auto[2][peaknum] != None:
# auto_peak_enable = auto_nopl.getpoints(cvpj_l, ['plugin_eq',fxpluginid,str(data_auto[2][peaknum])+'_on'])
# auto_peak_freq = auto_nopl.getpoints(cvpj_l, ['plugin_eq',fxpluginid,str(data_auto[2][peaknum])+'_freq'])
# auto_peak_gain = auto_nopl.getpoints(cvpj_l, ['plugin_eq',fxpluginid,str(data_auto[2][peaknum])+'_gain'])
# auto_peak_res = auto_nopl.getpoints(cvpj_l, ['plugin_eq',fxpluginid,str(data_auto[2][peaknum])+'_res'])
#if auto_peak_freq: for point in auto_peak_freq: point['value'] = eq_calc_freq(point['value'])
#if auto_peak_gain: for point in auto_peak_gain: point['value'] = eq_calc_gain(point['value'])
#if auto_peak_res: for point in auto_peak_res: point['value'] = eq_calc_peak(point['value'])
#add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_enable', data_Peaks[peaknum][1], peak_auto[0])
#add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_freq', eq_calc_freq(data_Peaks[peaknum][2]), peak_auto[1])
#add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_gain', eq_calc_gain(data_Peaks[peaknum][3]), peak_auto[2])
#add_sndinstparam(sng_fxdata, 'peak'+peakstr+'_res', data_Peaks[peaknum][4], peak_auto[3])
add_sndinstparam(sng_fxdata, 'highshelf_enable', data_HighShelf[1], [])
add_sndinstparam(sng_fxdata, 'highshelf_freq', eq_calc_freq(data_HighShelf[2]), [])
add_sndinstparam(sng_fxdata, 'highshelf_gain', eq_calc_gain(data_HighShelf[3]), [])
add_sndinstparam(sng_fxdata, 'highshelf_res', data_HighShelf[4], [])
add_sndinstparam(sng_fxdata, 'hpf_enable', data_HP[1], [])
add_sndinstparam(sng_fxdata, 'hpf_freq', eq_calc_freq(data_HP[2]), [])
add_sndinstparam(sng_fxdata, 'hpf_res', data_HP[4], [])
add_sndinstparam(sng_fxdata, 'hpf_slope', 0.25, [])
add_sndinstparam(sng_fxdata, 'master_gain', (gain_out/40)+0.5, [])
sng_fxchain.append(sng_fxdata)
@ -137,22 +159,13 @@ def add_fx(sng_trkdata, s_trackdata):
sng_fxdata['identifier'] = fxpluginname
sng_fxdata['bypass'] = not fx_on
if fxpluginname == 'com.soundation.compressor': snd_params = ['gain','release','ratio','threshold','attack']
elif fxpluginname == 'com.soundation.degrader': snd_params = ['gain','rate','reduction','mix']
elif fxpluginname == 'com.soundation.delay': snd_params = ['dry','feedback','feedback_filter','timeBpmSync','timeL','timeLSynced','timeR','timeRSynced','wet']
elif fxpluginname == 'com.soundation.distortion': snd_params = ['gain','volume','mode']
elif fxpluginname == 'com.soundation.equalizer': snd_params = ['low','mid','high']
elif fxpluginname == 'com.soundation.fakie': snd_params = ['attack','hold','release','depth']
elif fxpluginname == 'com.soundation.filter': snd_params = ['cutoff','resonance','mode']
elif fxpluginname == 'com.soundation.limiter': snd_params = ['attack','gain','release','threshold']
elif fxpluginname == 'com.soundation.parametric-eq': snd_params = ["highshelf_enable", "highshelf_freq", "highshelf_gain", "highshelf_res", "hpf_enable", "hpf_freq", "hpf_res", "hpf_slope", "lowshelf_enable", "lowshelf_freq", "lowshelf_gain", "lowshelf_res", "lpf_enable", "lpf_freq", "lpf_res", "lpf_slope", "master_gain", "peak1_enable", "peak1_freq", "peak1_gain", "peak1_res", "peak2_enable", "peak2_freq", "peak2_gain", "peak2_res", "peak3_enable", "peak3_freq", "peak3_gain", "peak3_res", "peak4_enable", "peak4_freq", "peak4_gain", "peak4_res"]
elif fxpluginname == 'com.soundation.phaser': snd_params = ['rateBpmSync','rateSynced','feedback','rate','range','freq','wet','dry']
elif fxpluginname == 'com.soundation.reverb': snd_params = ['size','damp','width','wet','dry']
elif fxpluginname == 'com.soundation.tremolo': snd_params = ['speed','depth','phase']
elif fxpluginname == 'com.soundation.wubfilter': snd_params = ['type','cutoff','resonance','drive','lfo_type','lfo_speed','lfo_depth']
paramlist = dataset.params_list('plugin', fxpluginname)
for snd_param in snd_params:
cvpjiparam_to_sngparam(fx_plugindata, sng_fxdata, fxpluginid, snd_param, 0, True)
for snd_param in paramlist:
autodata = auto_nopl.getpoints(cvpj_l, ['plugin',fxpluginid,snd_param])
if autodata != None: autodata = cvpjauto_to_sngauto(autodata, ticksdiv)
else: autodata = []
cvpjiparam_to_sngparam(fx_plugindata, sng_fxdata, fxpluginid, snd_param, 0, autodata)
sng_fxchain.append(sng_fxdata)
@ -197,8 +210,11 @@ class output_soundation(plugin_output.base):
def getfileextension(self): return 'sng'
def parse(self, convproj_json, output_file):
global cvpj_l
global europa_vals
europa_vals = synth_nonfree_values.europa_valnames()
global ticksdiv
global dataset
dataset = data_dataset.dataset('./data_dset/soundation.dset')
dataset_synth_nonfree = data_dataset.dataset('./data_dset/synth_nonfree.dset')
cvpj_l = json.loads(convproj_json)
bpm = params.get(cvpj_l, [], 'bpm', 120)[0]
@ -342,21 +358,23 @@ class output_soundation(plugin_output.base):
plugtype = cvpj_plugindata.type_get()
a_predelay, a_attack, a_hold, a_decay, a_sustain, a_release, a_amount = cvpj_plugindata.asdr_env_get('vol')
if plugtype == ['synth-nonfree', 'Europa']:
if plugtype == ['synth-nonfree', 'europa']:
inst_supported = True
sng_instparams['identifier'] = 'com.soundation.europa'
for paramname in europa_vals:
eur_value_type, cvpj_val_name = europa_vals[paramname]
if eur_value_type == 'number':
eur_value_value = cvpj_plugindata.param_get(cvpj_val_name, 0)[0]
europaparamlist = dataset_synth_nonfree.params_list('plugin', 'europa')
for paramname in europaparamlist:
param = dataset_synth_nonfree.params_i_get('plugin', 'europa', paramname)
if not param[0]:
eur_value_value = cvpj_plugindata.param_get(paramname, 0)[0]
else:
eur_value_value = cvpj_plugindata.dataval_get(cvpj_val_name, 0)
eur_value_value = cvpj_plugindata.dataval_get(paramname, 0)
if paramname in ['Curve1','Curve2','Curve3','Curve4','Curve']:
eur_value_value = ','.join([str(x).zfill(2) for x in eur_value_value])
add_sndinstparam(sng_instparams, "/custom_properties/"+paramname, eur_value_value, True)
add_sndinstparam(sng_instparams, "/soundation/sample", None, True)
add_sndinstparam(sng_instparams, "/custom_properties/"+paramname, eur_value_value, [])
add_sndinstparam(sng_instparams, "/soundation/sample", None, [])
elif plugtype[0] == 'native-soundation':
inst_supported = True
@ -376,38 +394,38 @@ class output_soundation(plugin_output.base):
for oscnum in range(4):
for paramtype in ['detune','pitch','type','vol']:
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, 'osc_'+str(oscnum)+'_'+paramtype, 0, True)
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, 'osc_'+str(oscnum)+'_'+paramtype, 0, [])
f_predelay, f_attack, f_hold, f_decay, f_sustain, f_release, f_amount = cvpj_plugindata.asdr_env_get('cutoff')
add_sndinstparam(sng_instparams, 'filter_attack', f_attack, True)
add_sndinstparam(sng_instparams, 'filter_decay', f_decay, True)
add_sndinstparam(sng_instparams, 'filter_sustain', f_sustain, True)
add_sndinstparam(sng_instparams, 'filter_release', f_release, True)
add_sndinstparam(sng_instparams, 'filter_int', f_amount, True)
add_sndinstparam(sng_instparams, 'filter_attack', f_attack, [])
add_sndinstparam(sng_instparams, 'filter_decay', f_decay, [])
add_sndinstparam(sng_instparams, 'filter_sustain', f_sustain, [])
add_sndinstparam(sng_instparams, 'filter_release', f_release, [])
add_sndinstparam(sng_instparams, 'filter_int', f_amount, [])
f_enabled, f_cutoff, f_reso, f_type, f_subtype = cvpj_plugindata.filter_get()
add_sndinstparam(sng_instparams, 'filter_cutoff', xtramath.between_to_one(20, 7500, f_cutoff), True)
add_sndinstparam(sng_instparams, 'filter_resonance', f_reso, True)
add_sndinstparam(sng_instparams, 'filter_cutoff', xtramath.between_to_one(20, 7500, f_cutoff), [])
add_sndinstparam(sng_instparams, 'filter_resonance', f_reso, [])
for snd_param in ['noise_vol', 'noise_color']:
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, snd_param, 0, True)
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, snd_param, 0, [])
elif plugtype[1] == 'com.soundation.supersaw':
set_asdr(sng_instparams, a_attack, a_sustain, a_decay, a_release)
for snd_param in ["detune", "spread"]:
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, snd_param, 0, True)
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, snd_param, 0, [])
elif plugtype[1] == 'com.soundation.noiser':
set_asdr(sng_instparams, a_attack, a_sustain, a_decay, a_release)
elif plugtype[1] == 'com.soundation.drummachine':
for paramname in ["gain_2", "hold_1", "pitch_6", "gain_1", "decay_5", "gain_5", "hold_0", "hold_2", "pitch_7", "gain_0", "decay_6", "gain_3", "hold_5", "pitch_3", "decay_4", "pitch_4", "gain_6", "decay_7", "pitch_2", "hold_6", "decay_1", "decay_3", "decay_0", "decay_2", "gain_7", "pitch_0", "pitch_5", "hold_3", "pitch_1", "hold_4", "hold_7", "gain_4"]:
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, paramname, 0, True)
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, paramname, 0, [])
cvpjidata_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, 'kit_name', '')
elif plugtype[1] == 'com.soundation.spc':
for paramname in soundation_values.spc_vals():
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, paramname, 0, True)
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, paramname, 0, [])
for dataname in ['cuts','envelopes']:
sng_instparams[dataname] = cvpj_plugindata.dataval_get(dataname, '')
@ -417,7 +435,7 @@ class output_soundation(plugin_output.base):
elif plugtype[1] == 'com.soundation.mono': snd_params = ['filter_int','cutoff','resonance','pw','filter_decay','mix','amp_decay','glide']
elif plugtype[1] == 'com.soundation.the_wub_machine': snd_params = ['filter_cutoff','filter_drive','filter_resonance','filter_type','filth_active','filth_amount','lfo_depth','lfo_keytracking','lfo_loop','lfo_phase','lfo_retrigger','lfo_speed','lfo_type','msl_amount','osc1_gain','osc1_glide','osc1_pan','osc1_pitch','osc1_shape','osc1_type','osc2_gain','osc2_glide','osc2_pan','osc2_pitch','osc2_shape','osc2_type','osc_sub_bypass_filter','osc_sub_gain','osc_sub_glide','osc_sub_shape','osc_sub_volume_lfo','reese_active','unison_active','unison_amount','unison_count']
for snd_param in snd_params:
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, snd_param, 0, True)
cvpjiparam_to_sngparam(cvpj_plugindata, sng_instparams, pluginid, snd_param, 0, [])
elif plugtype[0] == 'midi':
inst_supported = True
@ -426,34 +444,34 @@ class output_soundation(plugin_output.base):
gm2_samplepack = idvals.get_idval(idvals_inst_gm2, str(midiinst)+'_'+str(midibank), 'url')
if gm2_samplepack == None and midibank not in [0, 128]:
gm2_samplepack = idvals.get_idval(idvals_inst_gm2, str(midiinst)+'_0', 'url')
add_sndinstparam(sng_instparams, 'sample_pack', gm2_samplepack, False)
add_sndinstparam(sng_instparams, 'attack', 0, True)
add_sndinstparam(sng_instparams, 'decay', 0, True)
add_sndinstparam(sng_instparams, 'sustain', 1, True)
add_sndinstparam(sng_instparams, 'release', 0, True)
add_sndinstparam(sng_instparams, 'sample_pack', gm2_samplepack, None)
add_sndinstparam(sng_instparams, 'attack', 0, [])
add_sndinstparam(sng_instparams, 'decay', 0, [])
add_sndinstparam(sng_instparams, 'sustain', 1, [])
add_sndinstparam(sng_instparams, 'release', 0, [])
elif plugtype[0] == 'retro':
if plugtype[1] == 'sine': gm2_samplepack = '81_8_Sine_Wave.smplpck'
if plugtype[1] == 'square': gm2_samplepack = '81_0_Square_Lead.smplpck'
if plugtype[1] == 'triangle': gm2_samplepack = '85_0_Charang.smplpck'
if plugtype[1] == 'saw': gm2_samplepack = '82_0_Saw_Wave.smplpck'
add_sndinstparam(sng_instparams, 'attack', 0, True)
add_sndinstparam(sng_instparams, 'decay', 0, True)
add_sndinstparam(sng_instparams, 'sustain', 1, True)
add_sndinstparam(sng_instparams, 'release', 0, True)
add_sndinstparam(sng_instparams, 'attack', 0, [])
add_sndinstparam(sng_instparams, 'decay', 0, [])
add_sndinstparam(sng_instparams, 'sustain', 1, [])
add_sndinstparam(sng_instparams, 'release', 0, [])
if inst_supported == False:
add_sndinstparam(sng_instparams, 'sample_pack', '2_0_Bright_Yamaha_Grand.smplpck', False)
if pluginid != None:
add_sndinstparam(sng_instparams, 'attack', a_attack, True)
add_sndinstparam(sng_instparams, 'decay', a_decay, True)
add_sndinstparam(sng_instparams, 'sustain', a_sustain, True)
add_sndinstparam(sng_instparams, 'release', a_release, True)
add_sndinstparam(sng_instparams, 'attack', a_attack, [])
add_sndinstparam(sng_instparams, 'decay', a_decay, [])
add_sndinstparam(sng_instparams, 'sustain', a_sustain, [])
add_sndinstparam(sng_instparams, 'release', a_release, [])
else:
add_sndinstparam(sng_instparams, 'attack', 0, True)
add_sndinstparam(sng_instparams, 'decay', 0, True)
add_sndinstparam(sng_instparams, 'sustain', 1, True)
add_sndinstparam(sng_instparams, 'release', 0, True)
add_sndinstparam(sng_instparams, 'attack', 0, [])
add_sndinstparam(sng_instparams, 'decay', 0, [])
add_sndinstparam(sng_instparams, 'sustain', 1, [])
add_sndinstparam(sng_instparams, 'release', 0, [])
sng_trkdata['userSetName'] = data_values.get_value(s_trackdata, 'name', '')
sng_trkdata['regions'] = []

View File

@ -11,7 +11,7 @@ from functions import note_data
from functions import params
from functions import plugins
from functions import song
from functions_plugin import waveform_values
from functions import data_dataset
from functions_tracks import tracks_r
import math
@ -161,10 +161,12 @@ def get_plugins(xml_tag, cvpj_fxids):
wf_PLUGIN.set('presetDirty', '1')
wf_PLUGIN.set('enabled', str(fx_on))
for waveform_param in waveform_params[plugtype[1]]:
defvaluevals = waveform_params[plugtype[1]][waveform_param]
paramdata = cvpj_plugindata.param_get(waveform_param, defvaluevals[1])[0]
wf_PLUGIN.set(waveform_param, str(paramdata))
paramlist = dataset.params_list('plugin', plugintype)
for paramid in paramlist:
dset_paramdata = dataset.params_i_get('plugin', plugintype, paramid)
paramdata = cvpj_plugindata.param_get(paramid, dset_paramdata[2])[0]
wf_PLUGIN.set(paramid, str(paramdata))
@ -188,15 +190,16 @@ class output_waveform_edit(plugin_output.base):
def getfileextension(self): return 'tracktionedit'
def parse(self, convproj_json, output_file):
global cvpj_l
global waveform_params
global dataset
wf_proj = ET.Element("EDIT")
wf_proj.set('appVersion', "Waveform 11.5.18")
wf_proj.set('modifiedBy', "DawVert")
waveform_params = waveform_values.devicesparam()
cvpj_l = json.loads(convproj_json)
dataset = data_dataset.dataset('./data_dset/waveform.dset')
wf_bpmdata = 120
wf_numerator = 4
wf_denominator = 4

View File

@ -21,19 +21,6 @@ class plugconv(plugin_plugconv.base):
plugin_vst2.replace_data(cvpj_plugindata, 'name','any', 'Density2', 'chunk', struct.pack('<ffff', distlevel, 0, 1, 1), None)
return True
if plugintype[1] == 'delay':
print('[plug-conv] Online Sequencer to VST2: Delay > ZamDelay:',pluginid)
plugin_vst2.replace_data(cvpj_plugindata, 'name','any', 'ZamDelay', 'param', None, 8)
cvpj_plugindata.param_add('vst_param_0', 0, 'float', "Invert")
cvpj_plugindata.param_add('vst_param_1', 0.019877, 'float', "Time")
cvpj_plugindata.param_add('vst_param_2', 1, 'float', "Sync BPM")
cvpj_plugindata.param_add('vst_param_3', 1, 'float', "LPF")
cvpj_plugindata.param_add('vst_param_4', 0.75, 'float', "Divisor")
cvpj_plugindata.param_add('vst_param_5', 1, 'float', "Output Gain")
cvpj_plugindata.param_add('vst_param_6', 0.24, 'float', "Dry/Wet")
cvpj_plugindata.param_add('vst_param_7', 0.265, 'float', "Feedback")
return True
elif plugintype[1] == 'eq':
print('[plug-conv] Online Sequencer to VST2: EQ > 3 Band EQ:',pluginid)
eq_high = cvpj_plugindata.param_get('eq_high', 0)[0]

View File

@ -9,15 +9,17 @@ class plugconv(plugin_plugconv.base):
def is_dawvert_plugin(self): return 'plugconv'
def getplugconvinfo(self): return ['native-piyopiyo', None, 'piyopiyo'], ['vst2', None, None], True, False
def convert(self, cvpj_l, pluginid, cvpj_plugindata, extra_json):
print('[plug-conv] Converting PiyoPiyo to Vital:',pluginid)
vital_data = plugin_vital.vital_data(cvpj_plugindata)
vital_data.setvalue('osc_1_on', 1)
vital_data.setvalue('osc_1_level', 0.5)
vital_data.setvalue('volume', 4000)
vital_data.setvalue_timed('env_1_release', 20)
vital_data.importcvpj_wave(cvpj_plugindata, 1, None)
#vital_data.importcvpj_env_block(cvpj_plugindata, 1, 'vol')
vital_data.importcvpj_env_points(cvpj_plugindata, 1, 'vol')
vital_data.set_modulation(1, 'lfo_1', 'osc_1_level', 1, 0, 1, 0, 0)
vital_data.to_cvpj_vst2()
return True
plugtype = cvpj_plugindata.type_get()
if plugtype == ['native-piyopiyo', 'wave']:
print('[plug-conv] Converting PiyoPiyo to Vital:',pluginid)
vital_data = plugin_vital.vital_data(cvpj_plugindata)
vital_data.setvalue('osc_1_on', 1)
vital_data.setvalue('osc_1_level', 0.5)
vital_data.setvalue('volume', 4000)
vital_data.setvalue_timed('env_1_release', 20)
vital_data.importcvpj_wave(cvpj_plugindata, 1, None)
#vital_data.importcvpj_env_block(cvpj_plugindata, 1, 'vol')
vital_data.importcvpj_env_points(cvpj_plugindata, 1, 'vol')
vital_data.set_modulation(1, 'lfo_1', 'osc_1_level', 1, 0, 1, 0, 0)
vital_data.to_cvpj_vst2()
return True