diff --git a/BlenderRemoteDebug.py b/BlenderRemoteDebug.py deleted file mode 100644 index 710a9f4..0000000 --- a/BlenderRemoteDebug.py +++ /dev/null @@ -1,11 +0,0 @@ -#script to run: -SCRIPT="/project/terry/Dev/eclipse-workspace/ABX/src/abx.py" - -#path to the PyDev folder that contains a file named pydevd.py: -PYDEVD_PATH='/home/terry/.eclipse/360744294_linux_gtk_x86_64/plugins/org.python.pydev.core_7.3.0.201908161924/pysrc/' - -#PYDEVD_PATH='/home/terry/.config/blender/2.79/scripts/addons/modules/pydev_debug.py' - -import pydev_debug as pydev #pydev_debug.py is in a folder from Blender PYTHONPATH - -pydev.debug(SCRIPT, PYDEVD_PATH, trace = True) diff --git a/DebugInBlender.py b/DebugInBlender.py index 0a65e41..83628f3 100644 --- a/DebugInBlender.py +++ b/DebugInBlender.py @@ -1,4 +1,4 @@ #!/usr/bin/env python # Run the script in the debugger client within Blender: import subprocess -subprocess.call(['blender', '-P', '/project/terry/Dev/eclipse-workspace/ABX/BlenderRemoteDebug.py']) +subprocess.call(['blender279', '-P', '/project/terry/Dev/Git/abx/scripts/BlenderRemoteDebug.py']) diff --git a/TestInBlender.py b/TestInBlender.py new file mode 100644 index 0000000..1af9ca7 --- /dev/null +++ b/TestInBlender.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +# Inject the unittest runner script into Blender and run it in batch mode: +import subprocess +subprocess.call(['blender279', '-b', '-P', '/project/terry/Dev/Git/abx/scripts/TestInBlender_bpy.py']) diff --git a/abx/__init__.py b/abx/__init__.py index d8fc801..fee1767 100644 --- a/abx/__init__.py +++ b/abx/__init__.py @@ -2,7 +2,7 @@ bl_info = { "name": "ABX", "author": "Terry Hancock / Lunatics.TV Project / Anansi Spaceworks", - "version": (0, 2, 5), + "version": (0, 2, 6), "blender": (2, 79, 0), "location": "SpaceBar Search -> ABX", "description": "Anansi Studio Extensions for Blender", @@ -22,17 +22,20 @@ try: except ImportError: print("Blender Add-On 'ABX' requires the Blender Python environment to run.") -print("blender_present = ", blender_present) - -if blender_present: +if blender_present: from . import abx_ui - + + BlendFile = abx_ui.BlendFile + def register(): - bpy.utils.register_module(__name__) - + abx_ui.register() + #bpy.utils.register_module(__name__) + def unregister(): - bpy.utils.unregister_module(__name__) + abx_ui.unregister() + #bpy.utils.unregister_module(__name__) if __name__ == "__main__": register() + \ No newline at end of file diff --git a/abx/abx.yaml b/abx/abx.yaml index eebaa22..364a862 100644 --- a/abx/abx.yaml +++ b/abx/abx.yaml @@ -1,72 +1,113 @@ # DEFAULT ABX SETTINGS --- -project_schema: - - rank: project - delimiter: '-' - words: True - type: string - - - rank: sequence - type: - VN: Vague Name - - - rank: shot - type: letter - maxlength: 1 - - - rank: element - type: string - maxlength: 2 +abx_default: True -render_profiles: - previz: - name: PreViz, - desc: 'GL/AVI Previz Render for Animatics', - engine: gl - version: any - fps: 30 - fps_div: 1000 - fps_skip: 1 - suffix: GL - format: AVI_JPEG - extension: avi - freestyle: False +project_unit: [] + +project_schema: [] + +definitions: + filetypes: + blend: "Blender File" + kdenlive: "Kdenlive Video Editor File" + mlt: "Kdenlive Video Mix Script" + svg: "Scalable Vector Graphics (Inkscape)" + kra: "Krita Graphic File" + xcf: "Gimp Graphic File" + png: "Portable Network Graphics (PNG) Image" + jpg: "Joint Photographic Experts Group (JPEG) Image" + aup: "Audacity Project" + ardour: "Ardour Project" + flac: "Free Lossless Audio Codec (FLAC)" + mp3: "MPEG Audio Layer III (MP3) Audio File" + ogg: "Ogg Vorbis Audio File" + avi: "Audio Video Interleave (AVI) Video Container" + mkv: "Matroska Video Container" + mp4: "Moving Picture Experts Group (MPEG) 4 Format" + txt: "Plain Text File" - quick: - name: 30fps Paint - desc: '30fps Simplified Paint-Only Render' - engine: bi - fps: 30 - fps_skip: 3 - suffix: PT - format: AVI_JPEG - extension: avi - freestyle: False, - antialias: False, - motionblur: False + roles: + extras: "Extras, crowds, auxillary animated movement" + mech: "Mechanical animation" + anim: "Character animation" + cam: "Camera direction" + vfx: "Visual special effects" + compos: "Compositing" + bkg: "Background 2D image" + bb: "Billboard 2D image" + tex: "Texture 2D image" + foley: "Foley sound" + voice: "Voice recording" + fx: "Sound effects" + music: "Music track" + cue: "Musical cue" + amb: "Ambient sound" + loop: "Ambient sound loop" + edit: "Video edit" + + roles_by_filetype: + kdenlive: edit + mlt: edit + + omit_ranks: # Controls how much we shorten names + edit: 0 + render: 0 + filename: 0 + scene: 0 + +abx: + render_profiles: + previz: + name: PreViz, + desc: 'GL/AVI Previz Render for Animatics' + engine: gl + version: any + fps: 30 + fps_div: 1000 + fps_skip: 1 + suffix: GL + format: AVI_JPEG + extension: avi + freestyle: False - check: - name: 1fps Check - desc: '1fps Full-Features Check Renders' - engine: bi - fps: 30 - fps_skip: 30 - suffix: CH - format: JPEG - extension: jpg - framedigits: 5 - freestyle: True - antialias: 8 - - full: - name: 30fps Full - desc: 'Full Render with all Features Turned On', - engine: bi - fps: 30 - fps_skip: 1 - suffix: '' - format: PNG - extension: png - framedigits: 5 - freestyle: True - antialias: 8 + quick: + name: 30fps Paint + desc: '30fps Simplified Paint-Only Render' + engine: bi + fps: 30 + fps_skip: 3 + suffix: PT + format: AVI_JPEG + extension: avi + freestyle: False, + antialias: False, + motionblur: False + + check: + name: 1fps Check + desc: '1fps Full-Features Check Renders' + engine: bi + fps: 30 + fps_skip: 30 + suffix: CH + format: JPEG + extension: jpg + framedigits: 5 + freestyle: True + antialias: 8 + + full: + name: 30fps Full + desc: 'Full Render with all Features Turned On' + engine: bi + fps: 30 + fps_skip: 1 + suffix: '' + format: PNG + extension: png + framedigits: 5 + freestyle: True + antialias: 8 + motionblur: 2 + rendersize: 100 + compress: 50 diff --git a/abx/abx_ui.py b/abx/abx_ui.py index 60d24a5..954b2ea 100644 --- a/abx/abx_ui.py +++ b/abx/abx_ui.py @@ -23,14 +23,22 @@ run into. # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # - - import os import bpy, bpy.utils, bpy.types, bpy.props +from bpy.app.handlers import persistent + +from . import file_context + +# if bpy.data.filepath: +# BlendfileContext = file_context.FileContext(bpy.data.filepath) +# else: +# BlendfileContext = file_context.FileContext() +# +# abx_data = BlendfileContext.abx_data from . import copy_anim -from . import std_lunatics_ink +from abx import ink_paint from . import render_profile @@ -273,8 +281,7 @@ class LunaticsSceneProperties(bpy.types.PropertyGroup): maxlen=0 ) -bpy.utils.register_class(LunaticsSceneProperties) -bpy.types.Scene.lunaprops = bpy.props.PointerProperty(type=LunaticsSceneProperties) + class LunaticsScenePanel(bpy.types.Panel): """ @@ -315,9 +322,7 @@ class RenderProfileSettings(bpy.types.PropertyGroup): description="Select from pre-defined profiles of render settings", default='full') -bpy.utils.register_class(RenderProfileSettings) -bpy.types.Scene.render_profile_settings = bpy.props.PointerProperty( - type=RenderProfileSettings) + class RenderProfilesOperator(bpy.types.Operator): """ @@ -335,6 +340,7 @@ class RenderProfilesOperator(bpy.types.Operator): return {'FINISHED'} + class RenderProfilesPanel(bpy.types.Panel): """ Add simple drop-down selector for generating common render settings with @@ -354,6 +360,7 @@ class RenderProfilesPanel(bpy.types.Panel): row.operator('render.render_profiles') + class copy_animation(bpy.types.Operator): """ Copy animation from active object to selected objects (select source last!). @@ -399,6 +406,8 @@ class copy_animation(bpy.types.Operator): return {'FINISHED'} + + class copy_animation_settings(bpy.types.PropertyGroup): """ Settings for the 'copy_animation' operator. @@ -423,8 +432,7 @@ class copy_animation_settings(bpy.types.PropertyGroup): description = "Scale factor for scaling animation (Re-Scale w/ 1.0 copies actions)", default = 1.0) -bpy.utils.register_class(copy_animation_settings) -bpy.types.Scene.copy_anim_settings = bpy.props.PointerProperty(type=copy_animation_settings) + class CharacterPanel(bpy.types.Panel): bl_space_type = "VIEW_3D" # window type panel is displayed in @@ -443,7 +451,7 @@ class CharacterPanel(bpy.types.Panel): layout.prop(settings, 'rescale') layout.prop(settings, 'scale_factor') - + class lunatics_compositing_settings(bpy.types.PropertyGroup): @@ -465,9 +473,7 @@ class lunatics_compositing_settings(bpy.types.PropertyGroup): description = "Render sky separately with compositing support (better shadows)", default = True) -bpy.utils.register_class(lunatics_compositing_settings) -bpy.types.Scene.lx_compos_settings = bpy.props.PointerProperty(type=lunatics_compositing_settings) - + class lunatics_compositing(bpy.types.Operator): """ Set up standard Lunatics scene compositing. @@ -483,7 +489,7 @@ class lunatics_compositing(bpy.types.Operator): """ scene = context.scene - shot = std_lunatics_ink.LunaticsShot(scene, + shot = ink_paint.LunaticsShot(scene, inkthru=context.scene.lx_compos_settings.inkthru, billboards=context.scene.lx_compos_settings.billboards, sepsky=context.scene.lx_compos_settings.sepsky ) @@ -497,7 +503,7 @@ class lunatics_compositing(bpy.types.Operator): # self.col = self.layout.col() # col.prop(settings, "inkthru", text="Ink Thru") # col.prop(settings, "billboards", text="Ink Thru") - + class LunaticsPanel(bpy.types.Panel): @@ -515,16 +521,52 @@ class LunaticsPanel(bpy.types.Panel): layout.prop(settings, 'inkthru', text="Ink-Thru") layout.prop(settings, 'billboards', text="Billboards") layout.prop(settings, 'sepsky', text="Separate Sky") - + + +BlendFile = file_context.FileContext() + +@persistent +def update_handler(ctxt): + BlendFile.update(bpy.data.filepath) + def register(): - bpy.utils.register_module(__name__) + bpy.utils.register_class(LunaticsSceneProperties) + bpy.types.Scene.lunaprops = bpy.props.PointerProperty(type=LunaticsSceneProperties) + bpy.utils.register_class(LunaticsScenePanel) + + bpy.utils.register_class(RenderProfileSettings) + bpy.types.Scene.render_profile_settings = bpy.props.PointerProperty( + type=RenderProfileSettings) + bpy.utils.register_class(RenderProfilesOperator) + bpy.utils.register_class(RenderProfilesPanel) + + bpy.utils.register_class(copy_animation) + bpy.utils.register_class(copy_animation_settings) + bpy.types.Scene.copy_anim_settings = bpy.props.PointerProperty(type=copy_animation_settings) + bpy.utils.register_class(CharacterPanel) + + bpy.utils.register_class(lunatics_compositing_settings) + bpy.types.Scene.lx_compos_settings = bpy.props.PointerProperty(type=lunatics_compositing_settings) + bpy.utils.register_class(lunatics_compositing) + bpy.utils.register_class(LunaticsPanel) + + bpy.app.handlers.save_post.append(update_handler) + bpy.app.handlers.load_post.append(update_handler) + bpy.app.handlers.scene_update_post.append(update_handler) def unregister(): - bpy.utils.unregister_module(__name__) + bpy.utils.unregister_class(LunaticsSceneProperties) + bpy.utils.unregister_class(LunaticsScenePanel) -if __name__ == "__main__": - register() - - - + bpy.utils.unregister_class(RenderProfileSettings) + bpy.utils.unregister_class(RenderProfilesOperator) + bpy.utils.unregister_class(RenderProfilesPanel) + + bpy.utils.unregister_class(copy_animation) + bpy.utils.unregister_class(copy_animation_settings) + bpy.utils.unregister_class(CharacterPanel) + + bpy.utils.unregister_class(lunatics_compositing_settings) + bpy.utils.unregister_class(lunatics_compositing) + bpy.utils.unregister_class(LunaticsPanel) diff --git a/abx/accumulate.py b/abx/accumulate.py index 553bef5..b592be3 100644 --- a/abx/accumulate.py +++ b/abx/accumulate.py @@ -244,6 +244,10 @@ class RecursiveDict(collections.OrderedDict): #-------- # Code for collecting the YAML files we need +ABX_YAML = os.path.join(os.path.dirname( + os.path.abspath(os.path.join(__file__))), + 'abx.yaml') + def collect_yaml_files(path, stems, dirmatch=False, sidecar=False, root='/'): """ @@ -327,8 +331,10 @@ def get_project_data(filepath): kitcat_root = get_project_root(kitcat_paths) - abx_data = combine_yaml(collect_yaml_files(filepath, - 'abx', root=kitcat_root)) + abx_data = combine_yaml([ABX_YAML])['abx'] + + abx_data.update(combine_yaml(collect_yaml_files(filepath, + 'abx', root=kitcat_root))) return kitcat_root, kitcat_data, abx_data diff --git a/abx/file_context.py b/abx/file_context.py index 10769f2..4dec9b9 100644 --- a/abx/file_context.py +++ b/abx/file_context.py @@ -58,6 +58,11 @@ Demo: import os, re, copy, string, collections import yaml +DEFAULT_YAML = {} +with open(os.path.join(os.path.dirname(__file__), 'abx.yaml')) as def_yaml_file: + DEFAULT_YAML.update(yaml.safe_load(def_yaml_file)) + + TESTPATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'testdata', 'myproject', 'Episodes', 'A.001-Pilot', 'Seq', 'LP-LastPoint', 'A.001-LP-1-BeginningOfEnd-anim.txt')) from . import accumulate @@ -66,6 +71,53 @@ from .accumulate import RecursiveDict wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)') +class Enum(dict): + def __init__(self, *options): + for i, option in enumerate(options): + if isinstance(option, list) or isinstance(option, tuple): + name = option[0] + self[i] = tuple(option) + else: + name = str(option) + self[i] = (option, option, option) + self[name] = i + if name not in ('name', 'number', 'options'): + setattr(self, name, i) + + @property + def options(self): + """ + This gives the options in a Blender-friendly format, with + tuples of three strings for initializing bpy.props.Enum(). + + If the Enum was initialized with strings, the options will + contain the same string three times. If initialized with + tuples of strings, they will be used unaltered. + """ + options = [] + number_keys = sorted([k for k in self.keys() if type(k) is int]) + return [self[i] for i in number_keys] + + def name(self, n): + if type(n) is int: + return self[n][0] + elif type(n) is str: + return n + else: + return None + + def number(self, n): + if type(n) is str: + return self[n] + elif type(n) is int: + return n + else: + return None + + +log_level = Enum('DEBUG', 'INFO', 'WARNING', 'ERROR') + + NameParsers = {} # Parser registry def registered_parser(parser): @@ -352,9 +404,12 @@ class Parser_ABX_Schema(object): start = 0 for start, (schema, name) in enumerate(zip(self.schemas, namepath)): field, r, s = self._parse_beginning(remainder, schema.delimiter) - if field.lower() == schema.format.format(name).lower(): - score += 1.0 - break + try: + if field.lower() == schema.format.format(name).lower(): + score += 1.0 + break + except ValueError: + print(' (365) field, format', field, schema.format) possible += 1.0 @@ -366,11 +421,14 @@ class Parser_ABX_Schema(object): if not remainder: break field, remainder, s = self._parse_beginning(remainder, schema.delimiter) score += s - if ( type(field) == str and - field.lower() == schema.format.format(name).lower()): - fields[schema.rank]={'code':field} - fields['rank'] = schema.rank - score += 1.0 + try: + if ( type(field) == str and + field.lower() == schema.format.format(name).lower()): + fields[schema.rank]={'code':field} + fields['rank'] = schema.rank + score += 1.0 + except ValueError: + print(' (384) field, format', field, schema.format) possible += 2.0 # Remaining fields are authoritative (doesn't affect score) @@ -387,6 +445,77 @@ class Parser_ABX_Schema(object): fields['role'] = self.roles_by_filetype[fields['filetype']] return score/possible, fields + +@registered_parser +class Parser_ABX_Fallback(object): + """ + Highly-tolerant parser to fall back to if the others fail + or can't be used. + """ + name = 'abx_fallback' + + filetypes = DEFAULT_YAML['definitions']['filetypes'] + roles = DEFAULT_YAML['definitions']['roles'] + roles_by_filetype = ( + DEFAULT_YAML['definitions']['roles_by_filetype']) + + main_sep_re = re.compile(r'\W+') # Any single non-word char + comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+') + + + def __init__(self, **kwargs): + pass + + def _parse_ending(self, filename, separator): + try: + remainder, suffix = filename.rsplit(separator, 1) + score = 1.0 + except ValueError: + remainder = filename + suffix = None + score = 0.0 + return (suffix, remainder, score) + + def __call__(self, filename, namepath): + fields = {} + score = 1.0 + possible = 4.5 + + split = filename.rsplit('.', 1) + if len(split)<2 or split[1] not in self.filetypes: + fields['filetype'] = None + remainder = filename + score += 1.0 + else: + fields['filetype'] = split[1] + remainder = split[0] + + comment_match = self.comment_sep_re.search(remainder) + if comment_match: + fields['comment'] = remainder[comment_match.end():] + remainder = remainder[:comment_match.start()] + else: + fields['comment'] = None + + role = self.main_sep_re.split(remainder)[-1] + if role in self.roles: + fields['role'] = role + remainder = remainder[:-1-len(role)] + score += 1.0 + else: + fields['role'] = None + + # Implied role + if fields['filetype'] in self.roles_by_filetype: + fields['role'] = self.roles_by_filetype[fields['filetype']] + score += 1.0 + + words = self.main_sep_re.split(remainder) + fields['code'] = ''.join([w.capitalize() for w in words]) + fields['title'] = remainder + + return score/possible, fields + class RankNotFound(LookupError): @@ -574,8 +703,19 @@ class NameContext(object): """ def __init__(self, container, fields=None, namepath_segment=(), ): - self.schemas = [] + self.clear() + if container or fields or namepath_segment: + self.update(container, fields, namepath_segment) + + def clear(self): self.fields = {} + self.schemas = ['project'] + self.rank = 0 + self.code = 'untitled' + self.container = None + self.namepath_segment = [] + + def update(self, container=None, fields=None, namepath_segment=()): self.container = container if namepath_segment: @@ -584,12 +724,9 @@ class NameContext(object): self.namepath_segment = [] try: - #self.namepath = self.container.namepath self.schemas = self.container.schemas - except AttributeError: self.schemas = [] - #self.namepath = [] try: self.omit_ranks = self.container.omit_ranks @@ -606,12 +743,6 @@ class NameContext(object): self.fields.update(fields) elif isinstance(fields, str): self.fields.update(yaml.safe_load(fields)) - -# if 'code' in self.fields: -# self.namepath.append(self.fields['code']) - - #self.code = self.fields[self.rank]['code'] - def update_fields(self, data): self.fields.update(data) @@ -684,7 +815,7 @@ class NameContext(object): return None @rank.setter - def set_rank(self, rank): + def rank(self, rank): self.fields['rank'] = rank @property @@ -699,7 +830,7 @@ class NameContext(object): return '' @name.setter - def set_name(self, name): + def name(self, name): self.fields['name'] = name @property @@ -710,7 +841,7 @@ class NameContext(object): return self.fields['code'] @code.setter - def code_setter(self, code): + def code(self, code): if self.rank: self.fields[self.rank] = {'code': code} else: @@ -724,7 +855,7 @@ class NameContext(object): return '' @description.setter - def set_description(self, description): + def description(self, description): self.fields['description'] = str(description) def _get_name_components(self): @@ -797,6 +928,7 @@ class FileContext(NameContext): # hierarchy = None #schema = None + # IMMUTABLE DEFAULTS: filepath = None root = None folders = () @@ -820,97 +952,151 @@ class FileContext(NameContext): Collect path context information from a given filepath. (Searches the filesystem for context information). """ - #self.clear() - self.notes = [] - - # First init the superclass NameContext NameContext.__init__(self, None, {}) - - self.namepath_segment = [] - - # TODO: - # I need to specify what happens when the path isn't defined. - # (Like we might need to initialize later?) - + self.clear() + self.clear_notes() if path: self.update(path) + + def clear(self): + NameContext.clear(self) + + # Identity + self.root = os.path.abspath(os.environ['HOME']) + self.render_root = os.path.join(self.root, 'Renders') + self.filetype = '' + self.role = '' + self.title = '' + self.comment = '' + # Containers + #self.notes = [] + self.name_contexts = [] + + # Status / Settings + self.filepath = None + self.filename = None + self.file_exists = False + self.folder_exists = False + self.omit_ranks = { + 'edit': 0, + 'render': 0, + 'filename': 0, + 'scene': 0} + + # Defaults + self.provided_data = RecursiveDict(DEFAULT_YAML) + self.abx_fields = DEFAULT_YAML['abx'] + + def clear_notes(self): + # We use this for logging, so it doesn't get cleared by the + # normal clear process. + self.notes = [] def update(self, path): - # Basic File Path Info - self.filepath = os.path.abspath(path) - self.filename = os.path.basename(path) + # Basic File Path Info + self.filepath = os.path.abspath(path) + self.filename = os.path.basename(path) + + # Does the file path exist? + if os.path.exists(path): + self.file_exists = True + self.folder_exists = True + else: + self.file_exists = False + if os.path.exists(os.path.dirname(path)): + self.folder_exists = True + else: + self.folder_exists = False - # Does the file path exist? - # - Should we create it? / Are we creating it? - - # We should add a default YAML file in the ABX software to guarantee - # necessary fields are in place, and to document the configuration for - # project developers. - - # Data from YAML Files - self._collect_yaml_data() - - # Did we find the YAML data for the project? - # Did we find the project root? - - # TODO: Bug? - # Note that 'project_schema' might not be correct if overrides are given. - # As things are, I think it will simply append the overrides, and this - # may lead to odd results. We'd need to actively compress the list by - # overwriting according to rank - # + # - Should we create it? / Are we creating it? + + # We should add a default YAML file in the ABX software to guarantee + # necessary fields are in place, and to document the configuration for + # project developers. + + # Data from YAML Files + #self._collect_yaml_data() + self.provided_data = RecursiveDict(DEFAULT_YAML) + + kitcat_root, kitcat_data, abx_data = accumulate.get_project_data(self.filepath) + self.root = kitcat_root + self.provided_data.update(kitcat_data) + path = os.path.abspath(os.path.normpath(self.filepath)) + root = os.path.abspath(self.root) + self.folders = [os.path.basename(self.root)] + self.folders.extend(os.path.normpath(os.path.relpath(path, root)).split(os.sep)[:-1]) + + self.abx_fields = abx_data + # Did we find the YAML data for the project? + # Did we find the project root? + + # TODO: Bug? + # Note that 'project_schema' might not be correct if overrides are given. + # As things are, I think it will simply append the overrides, and this + # may lead to odd results. We'd need to actively compress the list by + # overwriting according to rank + # + try: self._load_schemas(self.provided_data['project_schema']) self.namepath_segment = [d['code'] for d in self.provided_data['project_unit']] self.code = self.namepath[-1] - - # Was there a "project_schema" section? - # - if not, do we fall back to a default? - - # Was there a "project_unit" section? - # - if not, can we construct what we need from project_root & folders? - - # Is there a definitions section? - # Do we provide defaults? - - try: - self.render_root = os.path.join(self.root, - self.provided_data['definitions']['render_root']) - except KeyError: - self.render_root = os.path.join(self.root, 'Renders') - - self.omit_ranks = {} - try: - for key, val in self.provided_data['definitions']['omit_ranks'].items(): - self.omit_ranks[key] = int(val) - except KeyError: - self.omit_ranks.update({ - 'edit': 0, - 'render': 1, - 'filename': 1, - 'scene': 3}) - - # Data from Parsing the File Name - try: - self.parsers = [NameParsers[self.provided_data['definitions']['parser']](**self.schema['filenames'])] - except (TypeError, KeyError, IndexError): - self.parsers = [ - #Parser_ABX_Episode(), - Parser_ABX_Schema(self.schemas, self.provided_data['definitions'])] + except: + print("Errors finding Name Path (is there a 'project_schema' or 'project_unit' defined?") + pass + # print("\n(899) filename = ", self.filename) + # if 'project_schema' in self.provided_data: + # print("(899) project_schema: ", self.provided_data['project_schema']) + # else: + # print("(899) project schema NOT DEFINED") + # + # print("(904) self.namepath_segment = ", self.namepath_segment) - self.parser_chosen, self.parser_score = self._parse_filename() + + # Was there a "project_schema" section? + # - if not, do we fall back to a default? + + # Was there a "project_unit" section? + # - if not, can we construct what we need from project_root & folders? + + # Is there a definitions section? + # Do we provide defaults? + + try: + self.render_root = os.path.join(self.root, + self.provided_data['definitions']['render_root']) + except KeyError: + self.render_root = os.path.join(self.root, 'Renders') + + self.omit_ranks = {} + try: + for key, val in self.provided_data['definitions']['omit_ranks'].items(): + self.omit_ranks[key] = int(val) + except KeyError: + self.omit_ranks.update({ + 'edit': 0, + 'render': 1, + 'filename': 1, + 'scene': 3}) + + # Data from Parsing the File Name + try: + self.parsers = [NameParsers[self.provided_data['definitions']['parser']](**self.schema['filenames'])] + except (TypeError, KeyError, IndexError): + self.parsers = [ + #Parser_ABX_Episode(), + Parser_ABX_Schema(self.schemas, self.provided_data['definitions'])] - self.filetype = self.fields['filetype'] - self.role = self.fields['role'] - self.title = self.fields['title'] - self.comment = self.fields['comment'] - - # TODO: - # We don't currently consider the information from the folder names, - # though we could get some additional information this way - - # Empty / default attributes - self.name_contexts = [] + parser_chosen, parser_score = self._parse_filename() + self.log(log_level.INFO, "Parsed with %s, score: %d" % + (parser_chosen, parser_score)) + + + + # TODO: + # We don't currently consider the information from the folder names, + # though we could get some additional information this way + def __repr__(self): @@ -919,7 +1105,18 @@ class FileContext(NameContext): s = s + str(self.code) + '(' + str(self.rank) + ')' s = s + ')' return s - + + def log(self, level, msg): + if type(level) is str: + level = log_level.index(level) + self.notes.append((level, msg)) + + def get_log_text(self, level=log_level.INFO): + level = log_level.number(level) + return '\n'.join([ + ': '.join((log_level.name(note[0]), note[1])) + for note in self.notes + if log_level.number(note[0]) >= level]) def _parse_filename(self): """ @@ -928,6 +1125,7 @@ class FileContext(NameContext): """ fields = {} best_score = 0.0 + best_parser_name = None for parser in self.parsers: score, fielddata = parser(self.filename, self.namepath) if score > best_score: @@ -946,18 +1144,51 @@ class FileContext(NameContext): self.fields[key] = val - def _collect_yaml_data(self): - self.provided_data = RecursiveDict() - kitcat_root, kitcat_data, abx_data = accumulate.get_project_data(self.filepath) - self.root = kitcat_root - self.provided_data.update(kitcat_data) - path = os.path.abspath(os.path.normpath(self.filepath)) - root = os.path.abspath(self.root) - self.folders = [os.path.basename(self.root)] - self.folders.extend(os.path.normpath(os.path.relpath(path, root)).split(os.sep)[:-1]) - - self.abx_fields = abx_data +# def _collect_yaml_data(self): + @property + def filetype(self): + if 'filetype' in self.fields: + return self.fields['filetype'] + else: + return '' + + @filetype.setter + def filetype(self, filetype): + self.fields['filetype'] = filetype + + @property + def role(self): + if 'role' in self.fields: + return self.fields['role'] + else: + return '' + + @role.setter + def role(self, role): + self.fields['role'] = role + + @property + def title(self): + if 'title' in self.fields: + return self.fields['title'] + else: + return '' + + @title.setter + def title(self, title): + self.fields['title'] = title + + @property + def comment(self): + if 'comment' in self.fields: + return self.fields['comment'] + else: + return '' + + @comment.setter + def comment(self, comment): + self.fields['comment'] = comment @classmethod def deref_implications(cls, values, matchfields): @@ -993,12 +1224,9 @@ class FileContext(NameContext): fields = {} fields.update(self.fields) - namepath_segment = [] - - ranks = [s.rank for s in self.schemas] - - i_rank = len(self.namepath) - + namepath_segment = [] + ranks = [s.rank for s in self.schemas] + i_rank = len(self.namepath) old_rank = ranks[i_rank -1] # The new rank will be the highest rank mentioned, or the diff --git a/abx/std_lunatics_ink.py b/abx/ink_paint.py similarity index 100% rename from abx/std_lunatics_ink.py rename to abx/ink_paint.py diff --git a/abx/render_profile.py b/abx/render_profile.py index a2665df..17ce542 100644 --- a/abx/render_profile.py +++ b/abx/render_profile.py @@ -3,81 +3,201 @@ Blender Python code to set parameters based on render profiles. """ +import bpy import bpy, bpy.types, bpy.utils, bpy.props -from . import std_lunatics_ink +from abx import ink_paint -render_formats = { - # VERY simplified and limited list of formats from Blender that we need: - # : (, ), - 'PNG': ('PNG', 'png'), - 'JPG': ('JPEG', 'jpg'), - 'EXR': ('OPEN_EXR_MULTILAYER', 'exr'), - 'AVI': ('AVI_JPEG', 'avi'), - 'MKV': ('FFMPEG', 'mkv') - } +from . import file_context -def set_render_from_profile(scene, profile): - if 'engine' in profile: - if profile['engine'] == 'gl': - pass - elif profile['engine'] == 'bi': - scene.render.engine = 'BLENDER_RENDER' - elif profile['engine'] == 'cycles': - scene.render.engine = 'CYCLES' - elif profile['engine'] == 'bge': - scene.render.engine = 'BLENDER_GAME' +class RenderProfile(object): + render_formats = { + # VERY simplified and limited list of formats from Blender that we need: + # : (, ), + 'PNG': ('PNG', 'png'), + 'JPG': ('JPEG', 'jpg'), + 'EXR': ('OPEN_EXR_MULTILAYER', 'exr'), + 'AVI': ('AVI_JPEG', 'avi'), + 'MKV': ('FFMPEG', 'mkv') + } + + engines = { + 'bi': 'BLENDER_RENDER', + 'BLENDER_RENDER': 'BLENDER_RENDER', + 'BI': 'BLENDER_RENDER', + + 'cycles': 'CYCLES', + 'CYCLES': 'CYCLES', + + 'bge': 'BLENDER_GAME', + 'BLENDER_GAME': 'BLENDER_GAME', + 'BGE': 'BLENDER_GAME', + + 'gl': None, + 'GL': None + } + + + def __init__(self, fields): + + # Note: Settings w/ value *None* are left unaltered + # That is, they remain whatever they were before + # If a setting isn't included in the fields, then + # the attribute will be *None*. + + if 'engine' not in fields: + fields['engine'] = None - if 'fps' in profile: - scene.render.fps = profile['fps'] - - if 'fps_skip' in profile: - scene.frame_step = profile['fps_skip'] - - if 'format' in profile: - scene.render.image_settings.file_format = render_formats[profile['format']][0] - - if 'freestyle' in profile: - scene.render.use_freestyle = profile['freestyle'] - - if 'antialias' in profile: - if profile['antialias']: - scene.render.use_antialiasing = True - if profile['antialias'] in (5,8,11,16): - scene.render.antialiasing_samples = str(profile['antialias']) + if fields['engine']=='gl': + self.viewport_render = True + self.engine = None else: - scene.render.use_antialiasing = False - - if 'motionblur' in profile: - if profile['motionblur']: - scene.render.use_motion_blur = True - if type(profile['motionblur'])==int: - scene.render.motion_blur_samples = profile['motionblur'] + self.viewport_render = False + + if fields['engine'] in self.engines: + self.engine = self.engines[fields['engine']] else: - scene.render.use_motion_blur = False - - # Use Lunatics naming scheme for render target: - if 'framedigits' in profile: - framedigits = profile['framedigits'] - else: - framedigits = 5 + self.engine = None + + # Parameters which are stored as-is, without modification: + self.fps = 'fps' in fields and int(fields['fps']) or None + self.fps_skip = 'fps_skip' in fields and int(fields['fps_skip']) or None + self.fps_divisor = 'fps_divisor' in fields and float(fields['fps_divisor']) or None + self.rendersize = 'rendersize' in fields and int(fields['rendersize']) or None + self.compress = 'compress' in fields and int(fields['compress']) or None - if 'suffix' in profile: - suffix = profile['suffix'] - else: - suffix = '' + self.format = 'format' in fields and str(fields['format']) or None - if 'format' in profile: - rdr_fmt = render_formats[profile['format']][0] - ext = render_formats[profile['format']][1] - else: - rdr_fmt = 'PNG' - ext = 'png' - - path = std_lunatics_ink.LunaticsShot(scene).render_path( - suffix=suffix, framedigits=framedigits, ext=ext, rdr_fmt=rdr_fmt) - - scene.render.filepath = path + self.freestyle = 'freestyle' in fields and bool(fields['freestyle']) or None + + self.antialiasing_samples = None + self.use_antialiasing = None + if 'antialias' in fields: + if fields['antialias']: + self.use_antialiasing = True + if fields['antialias'] in (5,8,11,16): + self.antialiasing_samples = str(fields['antialias']) + else: + self.use_antialiasing = False + + self.use_motion_blur = None + self.motion_blur_samples = None + if 'motionblur' in fields: + if fields['motionblur']: + self.use_motion_blur = True + if type(fields['motionblur'])==int: + self.motion_blur_samples = int(fields['motionblur']) + else: + self.use_motion_blur = False + + if 'framedigits' in fields: + self.framedigits = fields['framedigits'] + else: + self.framedigits = 5 + + if 'suffix' in fields: + self.suffix = fields['suffix'] + else: + self.suffix = '' + + def apply(self, scene): + """ + Apply the profile settings to the given scene. + """ + if self.engine: scene.render.engine = self.engine + if self.fps: scene.render.fps = self.fps + if self.fps_skip: scene.frame_step = self.fps_skip + if self.fps_divisor: scene.render.fps_base = self.fps_divisor + if self.rendersize: scene.render.resolution_percentage = self.rendersize + if self.compress: scene.render.image_settings.compression = self.compress + + if self.format: + scene.render.image_settings.file_format = self.render_formats[self.format][0] + + if self.freestyle: scene.render.use_freestyle = self.freestyle + if self.use_antialiasing: + scene.render.use_antialiasing = self.use_antialiasing + + if self.antialiasing_samples: + scene.render.antialiasing_samples = self.antialiasing_samples + if self.use_motion_blur: + scene.render.use_motion_blur = self.use_motion_blur + + if self.motion_blur_samples: + scene.render.motion_blur_samples = self.motion_blur_samples + + if self.format: + # prefix = scene.name_context.render_path + # prefix = BlendfileContext.name_contexts[scene.name_context].render_path + prefix = 'path_to_render' # We actually need to get this from NameContext + if self.suffix: + scene.render.filepath = (prefix + '-' + self.suffix + '-' + + 'f'+('#'*self.framedigits) + '.' + + self.render_formats[self.format][1]) + + + +# def set_render_from_profile(scene, profile): +# if 'engine' in profile: +# if profile['engine'] == 'gl': +# pass +# elif profile['engine'] == 'bi': +# scene.render.engine = 'BLENDER_RENDER' +# elif profile['engine'] == 'cycles': +# scene.render.engine = 'CYCLES' +# elif profile['engine'] == 'bge': +# scene.render.engine = 'BLENDER_GAME' +# +# if 'fps' in profile: +# scene.render.fps = profile['fps'] +# +# if 'fps_skip' in profile: +# scene.frame_step = profile['fps_skip'] +# +# if 'format' in profile: +# scene.render.image_settings.file_format = render_formats[profile['format']][0] +# +# if 'freestyle' in profile: +# scene.render.use_freestyle = profile['freestyle'] +# +# if 'antialias' in profile: +# if profile['antialias']: +# scene.render.use_antialiasing = True +# if profile['antialias'] in (5,8,11,16): +# scene.render.antialiasing_samples = str(profile['antialias']) +# else: +# scene.render.use_antialiasing = False +# +# if 'motionblur' in profile: +# if profile['motionblur']: +# scene.render.use_motion_blur = True +# if type(profile['motionblur'])==int: +# scene.render.motion_blur_samples = profile['motionblur'] +# else: +# scene.render.use_motion_blur = False +# +# # Use Lunatics naming scheme for render target: +# if 'framedigits' in profile: +# framedigits = profile['framedigits'] +# else: +# framedigits = 5 +# +# if 'suffix' in profile: +# suffix = profile['suffix'] +# else: +# suffix = '' +# +# if 'format' in profile: +# rdr_fmt = render_formats[profile['format']][0] +# ext = render_formats[profile['format']][1] +# else: +# rdr_fmt = 'PNG' +# ext = 'png' +# +# path = ink_paint.LunaticsShot(scene).render_path( +# suffix=suffix, framedigits=framedigits, ext=ext, rdr_fmt=rdr_fmt) +# +# scene.render.filepath = path \ No newline at end of file diff --git a/pkg/abx-0.2.6a.zip b/pkg/abx-0.2.6a.zip new file mode 100644 index 0000000..552e9c3 Binary files /dev/null and b/pkg/abx-0.2.6a.zip differ diff --git a/pkg/abx/__init__.py b/pkg/abx/__init__.py new file mode 100644 index 0000000..82990a7 --- /dev/null +++ b/pkg/abx/__init__.py @@ -0,0 +1,39 @@ + +bl_info = { + "name": "ABX", + "author": "Terry Hancock / Lunatics.TV Project / Anansi Spaceworks", + "version": (0, 2, 6), + "blender": (2, 79, 0), + "location": "SpaceBar Search -> ABX", + "description": "Anansi Studio Extensions for Blender", + "warning": "", + "wiki_url": "", + "tracker_url": "", + "category": "Object", + } + +blender_present = False +try: + # These are protected so we can read the add-on metadata from my + # management scripts, which run in the O/S standard Python 3 + import bpy, bpy.utils, bpy.types + blender_present = True + +except ImportError: + print("Blender Add-On 'ABX' requires the Blender Python environment to run.") + +if blender_present: + from . import abx_ui + + def register(): + abx_ui.register() + #bpy.utils.register_module(__name__) + + def unregister(): + abx_ui.unregister() + #bpy.utils.unregister_module(__name__) + + +if __name__ == "__main__": + register() + \ No newline at end of file diff --git a/pkg/abx/abx.yaml b/pkg/abx/abx.yaml new file mode 100644 index 0000000..364a862 --- /dev/null +++ b/pkg/abx/abx.yaml @@ -0,0 +1,113 @@ +# DEFAULT ABX SETTINGS +--- +abx_default: True + +project_unit: [] + +project_schema: [] + +definitions: + filetypes: + blend: "Blender File" + kdenlive: "Kdenlive Video Editor File" + mlt: "Kdenlive Video Mix Script" + svg: "Scalable Vector Graphics (Inkscape)" + kra: "Krita Graphic File" + xcf: "Gimp Graphic File" + png: "Portable Network Graphics (PNG) Image" + jpg: "Joint Photographic Experts Group (JPEG) Image" + aup: "Audacity Project" + ardour: "Ardour Project" + flac: "Free Lossless Audio Codec (FLAC)" + mp3: "MPEG Audio Layer III (MP3) Audio File" + ogg: "Ogg Vorbis Audio File" + avi: "Audio Video Interleave (AVI) Video Container" + mkv: "Matroska Video Container" + mp4: "Moving Picture Experts Group (MPEG) 4 Format" + txt: "Plain Text File" + + roles: + extras: "Extras, crowds, auxillary animated movement" + mech: "Mechanical animation" + anim: "Character animation" + cam: "Camera direction" + vfx: "Visual special effects" + compos: "Compositing" + bkg: "Background 2D image" + bb: "Billboard 2D image" + tex: "Texture 2D image" + foley: "Foley sound" + voice: "Voice recording" + fx: "Sound effects" + music: "Music track" + cue: "Musical cue" + amb: "Ambient sound" + loop: "Ambient sound loop" + edit: "Video edit" + + roles_by_filetype: + kdenlive: edit + mlt: edit + + omit_ranks: # Controls how much we shorten names + edit: 0 + render: 0 + filename: 0 + scene: 0 + +abx: + render_profiles: + previz: + name: PreViz, + desc: 'GL/AVI Previz Render for Animatics' + engine: gl + version: any + fps: 30 + fps_div: 1000 + fps_skip: 1 + suffix: GL + format: AVI_JPEG + extension: avi + freestyle: False + + quick: + name: 30fps Paint + desc: '30fps Simplified Paint-Only Render' + engine: bi + fps: 30 + fps_skip: 3 + suffix: PT + format: AVI_JPEG + extension: avi + freestyle: False, + antialias: False, + motionblur: False + + check: + name: 1fps Check + desc: '1fps Full-Features Check Renders' + engine: bi + fps: 30 + fps_skip: 30 + suffix: CH + format: JPEG + extension: jpg + framedigits: 5 + freestyle: True + antialias: 8 + + full: + name: 30fps Full + desc: 'Full Render with all Features Turned On' + engine: bi + fps: 30 + fps_skip: 1 + suffix: '' + format: PNG + extension: png + framedigits: 5 + freestyle: True + antialias: 8 + motionblur: 2 + rendersize: 100 + compress: 50 diff --git a/pkg/abx/abx_ui.py b/pkg/abx/abx_ui.py new file mode 100644 index 0000000..5380e1d --- /dev/null +++ b/pkg/abx/abx_ui.py @@ -0,0 +1,560 @@ +# Anansi Studio Extensions for Blender 'ABX' +""" +Collection of Blender extension tools to make our jobs easier. +This is not really meant to be an integrated plugin, but rather +a collection of useful scripts we can run to solve problems we +run into. +""" +# +#Copyright (C) 2019 Terry Hancock +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# + +import os + +import bpy, bpy.utils, bpy.types, bpy.props + +from . import file_context + +# if bpy.data.filepath: +# BlendfileContext = file_context.FileContext(bpy.data.filepath) +# else: +# BlendfileContext = file_context.FileContext() +# +# abx_data = BlendfileContext.abx_data + +from . import copy_anim +from . import std_lunatics_ink +from . import render_profile + + +#configfile = os.path.join(os.path.dirname(__file__), 'config.yaml') + +#print("Configuration file path: ", os.path.abspath(configfile)) + +# Lunatics Scene Panel + +# Lunatics file/scene properties: + +# TODO: This hard-coded table is a temporary solution until I have figured +# out a good way to look these up from the project files (maybe YAML?): +seq_id_table = { + ('S1', 0): {'':'', 'mt':'Main Title'}, + ('S1', 1): {'':'', + 'TR':'Train', + 'SR':'Soyuz Rollout', + 'TB':'Touring Baikonur', + 'PC':'Press Conference', + 'SU':'Suiting Up', + 'LA':'Launch', + 'SF':'Soyuz Flight', + 'mt':'Main Title', + 'ad':'Ad Spot', + 'pv':'Preview', + 'et':'Episode Titles', + 'cr':'Credits' + }, + ('S1', 2): {'':'', + 'MM':'Media Montage', + 'mt':'Main Title', + 'et':'Episode Titles', + 'SS':'Space Station', + 'LC':'Loading Cargo', + 'TL':'Trans Lunar Injection', + 'BT':'Bed Time', + 'ad':'Ad Spot', + 'pv':'Preview', + 'cr':'Credits' + }, + ('S1', 3): {'':'', + 'mt':'Main Title', + 'et':'Episode Titles', + 'ZG':'Zero G', + 'LI':'Lunar Injection', + 'LO':'Lunar Orbit', + 'ML':'Moon Landing', + 'IR':'Iridium', + 'TC':'Touring Colony', + 'FD':'Family Dinner', + 'ad':'Ad Spot', + 'pv':'Preview', + 'cr':'Credits' + }, + ('S2', 0): {'':'', 'mt':'Main Title'}, + ('L', 0): {'':'', + 'demo':'Demonstration', + 'prop':'Property', + 'set': 'Set', + 'ext': 'Exterior Set', + 'int': 'Interior Set', + 'prac':'Practical', + 'char':'Character', + 'fx': 'Special Effect', + 'stock': 'Stock Animation' + }, + None: [''] + } + + +def get_seq_ids(self, context): + # + # Note: To avoid the reference bug mentioned in the Blender documentation, + # we only return values held in the global seq_id_table, which + # should remain defined and therefore hold a reference to the strings. + # + if not context: + seq_ids = seq_id_table[None] + else: + scene = context.scene + series = scene.lunaprops.series_id + episode = scene.lunaprops.episode_id + if (series, episode) in seq_id_table: + seq_ids = seq_id_table[(series, episode)] + else: + seq_ids = seq_id_table[None] + seq_enum_items = [(s, s, seq_id_table[series,episode][s]) for s in seq_ids] + return seq_enum_items + +# Another hard-coded table -- for render profiles +render_profile_table = { + 'previz': { + 'name': 'PreViz', + 'desc': 'GL/AVI Previz Render for Animatics', + 'engine':'gl', + 'version':'any', + 'fps': 30, + 'fps_div': 1000, + 'fps_skip': 1, + 'suffix': 'GL', + 'format': 'AVI', + 'freestyle': False + }, + + 'paint6': { + 'name': '6fps Paint', + 'desc': '6fps Simplified Paint-Only Render', + 'engine':'bi', + 'fps': 30, + 'fps_skip': 5, + 'suffix': 'P6', + 'format': 'AVI', + 'freestyle': False, + 'antialias': False, + 'motionblur': False + }, + + 'paint3': { + 'name': '3fps Paint', + 'desc': '3fps Simplified Paint-Only Render', + 'engine': 'bi', + 'fps': 30, + 'fps_skip': 10, + 'suffix': 'P3', + 'format': 'AVI', + 'freestyle': False, + 'antialias': False, + 'motionblur': False, + }, + + 'paint': { + 'name': '30fps Paint', + 'desc': '30fps Simplified Paint-Only Render', + 'engine': 'bi', + 'fps': 30, + 'fps_skip': 1, + 'suffix': 'PT', + 'format': 'AVI', + 'freestyle': False, + 'antialias': False, + 'motionblur': False + }, + + 'check': { + 'name': '1fps Check', + 'desc': '1fps Full-Features Check Renders', + 'engine': 'bi', + 'fps': 30, + 'fps_skip': 30, + 'suffix': 'CH', + 'format': 'JPG', + 'framedigits': 5, + 'freestyle': True, + 'antialias': 8 + }, + + 'full': { + 'name': '30fps Full', + 'desc': 'Full Render with all Features Turned On', + 'engine': 'bi', + 'fps': 30, + 'fps_skip': 1, + 'suffix': '', + 'format': 'PNG', + 'framedigits': 5, + 'freestyle': True, + 'antialias': 8 + }, + } + + +class LunaticsSceneProperties(bpy.types.PropertyGroup): + """ + Properties of the current scene. + """ + series_id = bpy.props.EnumProperty( + items=[ + ('S1', 'S1', 'Series One'), + ('S2', 'S2', 'Series Two'), + ('S3', 'S3', 'Series Three'), + ('A1', 'Aud','Audiodrama'), + ('L', 'Lib','Library') + ], + name="Series", + default='S1', + description="Series/Season of Animated Series, Audiodrama, or Library" + ) + + episode_id = bpy.props.IntProperty( + name="Episode", + default=0, + description="Episode number (0 means multi-use), ignored for Library", + min=0, + max=1000, + soft_max=18 + ) + + seq_id = bpy.props.EnumProperty( + name='', + items=get_seq_ids, + description="Sequence ID" + ) + + block_id = bpy.props.IntProperty( + name='', + default=1, + min=0, + max=20, + soft_max=10, + description="Block number" + ) + + use_multicam = bpy.props.BoolProperty( + name="Multicam", + default=False, + description="Use multicam camera/shot numbering?" + ) + + cam_id = bpy.props.IntProperty( + name="Cam", + default=0, + min=0, + max=20, + soft_max=10, + description="Camera number" + ) + + shot_id = bpy.props.EnumProperty( + name='Shot', + #items=[('NONE', '', 'Single')]+[(c,c,'Shot '+c) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'], + items=[(c,c,'Shot '+c) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'], + default='A', + description="Shot ID, normally a single capital letter, can be empty, two letters for transitions" + ) + + shot_name = bpy.props.StringProperty( + name='Name', + description='Short descriptive codename', + maxlen=0 + ) + + + +class LunaticsScenePanel(bpy.types.Panel): + """ + Add a panel to the Properties-Scene screen + """ + bl_idname = 'SCENE_PT_lunatics' + bl_label = 'Lunatics Project' + bl_space_type = 'PROPERTIES' + bl_region_type = 'WINDOW' + bl_context = 'scene' + + def draw(self, context): + lunaprops = bpy.context.scene.lunaprops + self.layout.label(text='Lunatics! Project Properties') + row = self.layout.row() + row.prop(lunaprops, 'series_id') + row.prop(lunaprops, 'episode_id') + row = self.layout.row() + row.prop(lunaprops, 'use_multicam') + row = self.layout.row() + row.prop(lunaprops, 'seq_id') + row.prop(lunaprops, 'block_id') + if lunaprops.use_multicam: + row.prop(lunaprops, 'cam_id') + row.prop(lunaprops, 'shot_id') + row.prop(lunaprops, 'shot_name') + +# Buttons + +class RenderProfileSettings(bpy.types.PropertyGroup): + """ + Settings for Render Profiles control. + """ + render_profile = bpy.props.EnumProperty( + name='Profile', + items=[(k, v['name'], v['desc']) + for k,v in render_profile_table.items()], + description="Select from pre-defined profiles of render settings", + default='full') + + + +class RenderProfilesOperator(bpy.types.Operator): + """ + Operator invoked implicitly when render profile is changed. + """ + bl_idname = 'render.render_profiles' + bl_label = 'Apply Render Profile' + bl_options = {'UNDO'} + + def invoke(self, context, event): + scene = context.scene + profile = render_profile_table[scene.render_profile_settings.render_profile] + + render_profile.set_render_from_profile(scene, profile) + + return {'FINISHED'} + + +class RenderProfilesPanel(bpy.types.Panel): + """ + Add simple drop-down selector for generating common render settings with + destination set according to project defaults. + """ + bl_idname = 'SCENE_PT_render_profiles' + bl_label = 'Render Profiles' + bl_space_type = 'PROPERTIES' + bl_region_type = 'WINDOW' + bl_context = 'render' + + def draw(self, context): + rps = bpy.context.scene.render_profile_settings + row = self.layout.row() + row.prop(rps, 'render_profile') + row = self.layout.row() + row.operator('render.render_profiles') + + + +class copy_animation(bpy.types.Operator): + """ + Copy animation from active object to selected objects (select source last!). + + Useful for fixing broken proxy rigs (create a new proxy, and used this tool + to copy all animation from the original -- avoids tedious/error-prone NLA work). + + Can also migrate to a re-scaled rig. + """ + bl_idname = 'object.copy_anim' + bl_label = 'Copy Animation' + bl_options = {'UNDO'} + + def invoke(self, context, event): + #print("Copy NLA from selected armature to active armatures.") + + src_ob = context.active_object + tgt_obs = [ob for ob in context.selected_objects if ob != context.active_object] + + # TODO + # Are these type checks necessary? + # Is there any reason to restrict this operator to armature objects? + # I think there isn't. + + if src_ob.type != 'ARMATURE': + self.report({'WARNING'}, 'Cannot copy NLA data from object that is not an ARMATURE.') + return {'CANCELLED'} + + tgt_arm_obs = [] + for ob in tgt_obs: + if ob.type == 'ARMATURE': + tgt_arm_obs.append(ob) + if not tgt_arm_obs: + self.report({'WARNING'}, 'No armature objects selected to copy animation data to.') + return {'CANCELLED'} + + copy_anim.copy_object_animation(src_ob, tgt_arm_obs, + dopesheet=context.scene.copy_anim_settings.dopesheet, + nla=context.scene.copy_anim_settings.nla, + rescale=context.scene.copy_anim_settings.rescale, + scale_factor=context.scene.copy_anim_settings.scale_factor, + report=self.report) + + return {'FINISHED'} + + + +class copy_animation_settings(bpy.types.PropertyGroup): + """ + Settings for the 'copy_animation' operator. + """ + dopesheet = bpy.props.BoolProperty( + name = "Dope Sheet", + description = "Copy animation from Dope Sheet", + default=True) + + nla = bpy.props.BoolProperty( + name = "NLA Strips", + description = "Copy all strips from NLA Editor", + default=True) + + rescale = bpy.props.BoolProperty( + name = "Re-Scale/Copy", + description = "Make rescaled COPY of actions instead of LINK to original", + default = False) + + scale_factor = bpy.props.FloatProperty( + name = "Scale", + description = "Scale factor for scaling animation (Re-Scale w/ 1.0 copies actions)", + default = 1.0) + + + +class CharacterPanel(bpy.types.Panel): + bl_space_type = "VIEW_3D" # window type panel is displayed in + bl_context = "objectmode" + bl_region_type = "TOOLS" # region of window panel is displayed in + bl_label = "Character" + bl_category = "ABX" + + def draw(self, context): + settings = bpy.context.scene.copy_anim_settings + layout = self.layout.column(align = True) + layout.label("Animation Data") + layout.operator('object.copy_anim') + layout.prop(settings, 'dopesheet') + layout.prop(settings, 'nla') + layout.prop(settings, 'rescale') + layout.prop(settings, 'scale_factor') + + + + +class lunatics_compositing_settings(bpy.types.PropertyGroup): + """ + Settings for the LX compositor tool. + """ + inkthru = bpy.props.BoolProperty( + name = "Ink-Thru", + description = "Support transparent Freestyle ink effect", + default=True) + + billboards = bpy.props.BoolProperty( + name = "Billboards", + description = "Support material pass for correct billboard inking", + default = False) + + sepsky = bpy.props.BoolProperty( + name = "Separate Sky", + description = "Render sky separately with compositing support (better shadows)", + default = True) + + +class lunatics_compositing(bpy.types.Operator): + """ + Set up standard Lunatics scene compositing. + """ + bl_idname = "scene.lunatics_compos" + bl_label = "Ink/Paint Config" + bl_options = {'UNDO'} + bl_description = "Set up standard Lunatics Ink/Paint compositing in scene" + + def invoke(self, context, event): + """ + Add standard 'Lunatics!' shot compositing to the currently-selected scene. + """ + scene = context.scene + + shot = std_lunatics_ink.LunaticsShot(scene, + inkthru=context.scene.lx_compos_settings.inkthru, + billboards=context.scene.lx_compos_settings.billboards, + sepsky=context.scene.lx_compos_settings.sepsky ) + + shot.cfg_scene() + + return {'FINISHED'} + +# def draw(self, context): +# settings = context.scene.lx_compos_settings +# self.col = self.layout.col() +# col.prop(settings, "inkthru", text="Ink Thru") +# col.prop(settings, "billboards", text="Ink Thru") + + + +class LunaticsPanel(bpy.types.Panel): + bl_space_type = "VIEW_3D" + bl_context = "objectmode" + bl_region_type = "TOOLS" + bl_label = "Lunatics" + bl_category = "ABX" + + def draw(self, context): + settings = bpy.context.scene.lx_compos_settings + layout = self.layout.column(align = True) + layout.label("Compositing") + layout.operator('scene.lunatics_compos') + layout.prop(settings, 'inkthru', text="Ink-Thru") + layout.prop(settings, 'billboards', text="Billboards") + layout.prop(settings, 'sepsky', text="Separate Sky") + + +def register(): + bpy.utils.register_class(LunaticsSceneProperties) + bpy.types.Scene.lunaprops = bpy.props.PointerProperty(type=LunaticsSceneProperties) + bpy.utils.register_class(LunaticsScenePanel) + + bpy.utils.register_class(RenderProfileSettings) + bpy.types.Scene.render_profile_settings = bpy.props.PointerProperty( + type=RenderProfileSettings) + bpy.utils.register_class(RenderProfilesOperator) + bpy.utils.register_class(RenderProfilesPanel) + + bpy.utils.register_class(copy_animation) + bpy.utils.register_class(copy_animation_settings) + bpy.types.Scene.copy_anim_settings = bpy.props.PointerProperty(type=copy_animation_settings) + bpy.utils.register_class(CharacterPanel) + + bpy.utils.register_class(lunatics_compositing_settings) + bpy.types.Scene.lx_compos_settings = bpy.props.PointerProperty(type=lunatics_compositing_settings) + bpy.utils.register_class(lunatics_compositing) + bpy.utils.register_class(LunaticsPanel) + +def unregister(): + bpy.utils.unregister_class(LunaticsSceneProperties) + bpy.utils.unregister_class(LunaticsScenePanel) + + bpy.utils.unregister_class(RenderProfileSettings) + bpy.utils.unregister_class(RenderProfilesOperator) + bpy.utils.unregister_class(RenderProfilesPanel) + + bpy.utils.unregister_class(copy_animation) + bpy.utils.unregister_class(copy_animation_settings) + bpy.utils.unregister_class(CharacterPanel) + + bpy.utils.unregister_class(lunatics_compositing_settings) + bpy.utils.unregister_class(lunatics_compositing) + bpy.utils.unregister_class(LunaticsPanel) diff --git a/pkg/abx/accumulate.py b/pkg/abx/accumulate.py new file mode 100644 index 0000000..b592be3 --- /dev/null +++ b/pkg/abx/accumulate.py @@ -0,0 +1,342 @@ +# accumulate.py +""" +Data structures for accumulating tree-structured data from multiple sources. + +Data is acquired from file and directory names and also from yaml files in the +tree. The yaml files are loaded in increasing priority from upper directories +to the local one, starting from the highest level file to contain a "project_root" +key. + +The files named for their parent directory are assumed to be KitCAT files (i.e. +"kitcat.yaml" and ".yaml" are treated the same way). Only files named +"abx.yaml" are assumed to be configuration files specific to ABX. + +We collect these by going up the file path, and then load them coming down. If +we find a "project_root" key, we ditch the previous data and start over. This way +any project files found above the project root will be ignored. + +As a use case: if we were to store a new project inside of another project, the +new project's project_root would make it blind to the settings in the containing +project. Other directories in the parent project would still go to the parent +project's root. This avoids having the location the project is stored affect +the project data. + +The overall structure is a dictionary. When updating with new data, any element +that is itself a dictionary is treated recursively (that is, it is updated with +directory data when another dictionary is provided for the same key). If an +element is a list, then data from successively-higher directories extends the +list (see UnionList, below). If a scalar replaces a dictionary or list value in +a more specific entry, then it clobbers it and any updated information in it. + +@author: Terry Hancock + +@copyright: 2019 Anansi Spaceworks. + +@license: GNU General Public License, version 2.0 or later. (Python code) + +@contact: digitante@gmail.com + +Demo: + +>>> import accumulate +>>> T1 = accumulate.RecursiveDict(accumulate.TEST_DICT_1) +>>> T2 = accumulate.RecursiveDict(accumulate.TEST_DICT_2) +>>> import copy +>>> Ta = copy.deepcopy(T1) +>>> Tb = copy.deepcopy(T2) +>>> Ta +RecursiveDict({'A': 1, 'B': [1, 2, 3], 'C': {'a': 1, 'b': 2, 'c': 3}, 'D': {}, 'E': None, 'F': {'h': {'i': {'j': {'k': 'abcdefghijk'}}}}}) +>>> Tb +RecursiveDict({'C': {'d': 4, 'e': 5, 'f': 6}, 'D': (1, 2, 3), 'B': [4, 5, 6], 'E': 0}) +>>> Ta.update(T2) +>>> Ta +RecursiveDict({'A': 1, 'B': [4, 5, 6, 1, 2, 3], 'C': {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}, 'D': (1, 2, 3), 'E': 0, 'F': {'h': {'i': {'j': {'k': 'abcdefghijk'}}}}}) +>>> Tb.update(T1) +>>> Tb +RecursiveDict({'C': {'d': 4, 'e': 5, 'f': 6, 'a': 1, 'b': 2, 'c': 3}, 'D': {}, 'B': [1, 2, 3, 4, 5, 6], 'E': None, 'A': 1, 'F': {'h': {'i': {'j': {'k': 'abcdefghijk'}}}}}) +>>> + +""" + +TEST_DICT_1 = { 'A':1, + 'B':[1,2,3], + 'C':{'a':1, 'b':2, 'c':3}, + 'D':{}, + 'E':None, + 'F':{'h':{'i':{'j':{'k':'abcdefghijk'}}}}, + } + +TEST_DICT_2 = { 'C':{'d':4, 'e':5, 'f':6}, + 'D':(1,2,3), + 'B':[4,5,6], + 'E':0 + } + +YAML_TEST = """ +A: 1 +B: + - 4 + - 5 + - 6 + - 1 + - 2 + - 3 +C: + a: 1 + b: 2 + c: 3 + d: 4 + e: 5 + f: 6 +D: (1, 2, 3) +E: 0 +F: + h: + i: + j: + k: abcdefghijk +""" + +import os, collections.abc, re +import yaml + +wordre = re.compile(r'([A-Z]+[a-z]*|[a-z]+|[0-9]+)') + +class OrderedSet(collections.abc.Set): + """ + List-based set from Python documentation example. + """ + def __init__(self, iterable): + self.elements = lst = [] + for value in iterable: + if value not in lst: + lst.append(value) + + def __iter__(self): + return iter(self.elements) + + def __contains__(self, value): + return value in self.elements + + def __len__(self): + return len(self.elements) + + def __repr__(self): + return repr(list(self)) + + def union(self, other): + return self.__or__(other) + + def intersection(self, other): + return self.__and__(other) + +class UnionList(list): + """ + Special list-based collection, which implements a "union" operator similar + to the one defined for sets. It only adds options from the other list + which are not already in the current list. + + Note that it is intentionally asymmetric. The initial list may repeat values + and they will be kept, so it does not require the list to consist only of + unique entries (unlike Set collections). + + This allows us to use this type for loading list-oriented data from data + files, which may or may not contain repetitions for different uses, but + also makes accumulation idempotent (running the union twice will not + increase the size of the result, because no new values will be found). + """ + def union(self, other): + combined = UnionList(self) + for element in other: + if element not in self: + combined.append(element) + return combined + +class RecursiveDict(collections.OrderedDict): + """ + A dictionary which updates recursively, updating any values which are + themselves dictionaries when the replacement value is a dictionary, rather + than replacing them, and treating any values which are themselves lists + as UnionLists and applying the union operation to combine them + (when the replacement value is also a list). + """ + def clear(self): + for key in self: + del self[key] + + def update(self, mapping): + for key in mapping: + if key in self: + if (isinstance(self[key], collections.abc.Mapping) and + isinstance(mapping[key], collections.abc.Mapping)): + # Subdictionary + newvalue = RecursiveDict(self[key]) + newvalue.update(RecursiveDict(mapping[key])) + self[key] = newvalue + + elif ((isinstance(self[key], collections.abc.MutableSequence) or + isinstance(self[key], collections.abc.Set)) and + (isinstance(mapping[key], collections.abc.MutableSequence) or + isinstance(mapping[key], collections.abc.Set))): + # Sublist + self[key] = UnionList(self[key]).union(UnionList(mapping[key])) + + else: # scalar + self[key] = mapping[key] + + else: # new key + self[key] = mapping[key] + + def get_data(self): + new = {} + for key in self: + if isinstance(self[key], RecursiveDict): + new[key]=dict(self[key].get_data()) + elif isinstance(self[key], UnionList): + new[key]=list(self[key]) + else: + new[key]=self[key] + return new + + def __setitem__(self, key, value): + if isinstance(value, collections.abc.Mapping): + super().__setitem__(key, RecursiveDict(value)) + + elif isinstance(value, collections.abc.MutableSequence): + super().__setitem__(key, UnionList(value)) + + else: + super().__setitem__(key,value) + + def __repr__(self, compact=False): + s = '' + if not compact: + s = s + '%s(' % self.__class__.__name__ + s = s + '{' + for key in self: + if isinstance(self[key], RecursiveDict): + s = s+"'%s'"%key + ': ' + "%s" % self[key].__repr__(compact=True) + ', ' + else: + s = s+ "'%s'"%key + ': ' + "%s" % repr(self[key]) + ', ' + if s.endswith(', '): s= s[:-2] + s = s + '}' + if not compact: + s = s + ')' + return s + + def from_yaml(self, yaml_string): + self.update(yaml.safe_load(yaml_string)) + return self + + def from_yaml_file(self, path): + with open(path, 'rt') as yamlfile: + self.update(yaml.safe_load(yamlfile)) + return self + + def to_yaml(self): + return yaml.dump(self.get_data()) + + def to_yaml_file(self, path): + with open(path, 'wt') as yamlfile: + yamlfile.write(yaml.dump(self.get_data())) + + +#-------- +# Code for collecting the YAML files we need + +ABX_YAML = os.path.join(os.path.dirname( + os.path.abspath(os.path.join(__file__))), + 'abx.yaml') + + +def collect_yaml_files(path, stems, dirmatch=False, sidecar=False, root='/'): + """ + Collect a list of file paths to YAML files. + + Does not attempt to read or interpret the files. + + @path: The starting point, typically the antecedent filename. + @stems: File stem (or sequence of stems) we recognize (in priority order). + @dirmatch: Also search for stems matching the containing directory name? + @sidecar: Also search for stems matching the antecent filename's stem? + @root: Top level directory to consider (do not search above this). + + "Stem" means the name with any extension after "." removed (typically, + the filetype). + """ + yaml_paths = [] + if type(stems) is str: + stems = (stems,) + + path = os.path.abspath(path) + path, filename = os.path.split(path) + if sidecar: + filestem = os.path.splitext(filename)[0] + sidecar_path = os.path.join(path, filestem + '.yaml') + if os.path.isfile(sidecar_path): + yaml_paths.append(sidecar_path) + + while not os.path.abspath(path) == os.path.dirname(root): + path, base = os.path.split(path) + + if dirmatch: + yaml_path = os.path.join(path, base, base + '.yaml') + if os.path.isfile(yaml_path): + yaml_paths.append(yaml_path) + + for stem in stems: + yaml_path = os.path.join(path, base, stem + '.yaml') + if os.path.isfile(yaml_path): + yaml_paths.append(yaml_path) + + yaml_paths.reverse() + return yaml_paths + + +def has_project_root(yaml_path): + with open(yaml_path, 'rt') as yaml_file: + data = yaml.safe_load(yaml_file) + if 'project_root' in data: + return True + else: + return False + +def trim_to_project_root(yaml_paths): + for i in range(len(yaml_paths)-1,-1,-1): + if has_project_root(yaml_paths[i]): + return yaml_paths[i:] + return yaml_paths + +def get_project_root(yaml_paths): + trimmed = trim_to_project_root(yaml_paths) + if trimmed: + return os.path.dirname(trimmed[0]) + else: + # No project root was found! + return '/' + +def combine_yaml(yaml_paths): + data = RecursiveDict() + for path in yaml_paths: + with open(path, 'rt') as yaml_file: + data.update(yaml.safe_load(yaml_file)) + return data + +def get_project_data(filepath): + # First, get the KitCAT data. + kitcat_paths = collect_yaml_files(filepath, + ('kitcat', 'project'), dirmatch=True, sidecar=True) + + kitcat_data = combine_yaml(trim_to_project_root(kitcat_paths)) + + kitcat_root = get_project_root(kitcat_paths) + + abx_data = combine_yaml([ABX_YAML])['abx'] + + abx_data.update(combine_yaml(collect_yaml_files(filepath, + 'abx', root=kitcat_root))) + + return kitcat_root, kitcat_data, abx_data + + + \ No newline at end of file diff --git a/pkg/abx/blender_context.py b/pkg/abx/blender_context.py new file mode 100644 index 0000000..1a7d217 --- /dev/null +++ b/pkg/abx/blender_context.py @@ -0,0 +1,169 @@ +# blender_context.py +""" +Contextual metadata acquired from internal values in a Blender file. + +This module must be invoked from within Blender to work, as it relies on the bpy Blender API +module and the currently-open Blender file's data graph in order to work. + +It collects data about scenes, objects, groups, and other datablocks in the Blender file, +as well as data encoded in text blocks in different formats. Overall file data is incorporated +into a PropertyGroup attached to the "WindowManager" object identified as 'WinMan' (normally, +it appears there is only ever one of these in a Blender file, but if there is more than one, this +is the one that will be used). +""" + +import io +import bpy, bpy.app, bpy.props, bpy.utils +from bpy.app.handlers import persistent +from accumulate import UnionList, RecursiveDict +import yaml + +def EnumFromList(schema, listname): + return [(e, e.capitalize(), e.capitalize()) for e in schema[listname]] + +prop_types = { + 'string':{ + 'property': bpy.props.StringProperty, + 'keywords': { 'name', 'description', 'default', 'maxlen', 'options', 'subtype'}, + 'translate': { + 'desc': ('description', None)}}, + 'enum': { + 'property': bpy.props.EnumProperty, + 'keywords': { 'items', 'name', 'description', 'default', 'options'}, + 'translate': { + 'desc': ('description', None), + 'items_from': ('items', EnumFromList)}}, + 'int': { + 'property': bpy.props.IntProperty, + 'keywords': { 'name', 'description', 'default', 'min', 'max', 'soft_min', 'soft_max', + 'step', 'options', 'subtype'}, + 'translate': { + 'desc': ('description', None)}}, + 'float': { + 'property': bpy.props.FloatProperty, + 'keywords': { 'name', 'description', 'default', 'min', 'max', 'soft_min', 'soft_max', + 'step', 'options', 'subtype', 'precision', 'unit'}, + 'translate': { + 'desc': ('description', None)}}, + 'bool': { + 'property': bpy.props.BoolProperty, + 'keywords': { 'name', 'description', 'default', 'options', 'subtype'}, + 'translate': { + 'desc': ('description', None)}} + } + +class AbxMeta(bpy.types.PropertyGroup): + """ + Metadata property group factory for attachment to Blender object types. + Definitions come from a YAML source (or default defined below). + """ + default_schema = yaml.safe_load(io.StringIO("""\ +--- +blender: + - id: project + type: string + level: project + name: Project Name + desc: Name of the project + maxlen: 32 + + - id: project_title + type: string + level: project + name: Project Title + desc: Full title for the project + maxlen: 64 + + - id: project_description + type: string + level: project + name: Project Description + desc: Brief description of the project + maxlen: 128 + + - id: project_url + type: list string + level: project + name: Project URL + desc: URL for Project home page, or comma-separated list of Project URLs + + - id: level + type: enum + items_from: levels + name: Level + desc: Level of the file in the project hierarchy + +levels: + - project + - series + - episode + - seq + - subseq + - camera + - shot + - element + - frame + +hierarchies: + - library + - episodes + """)) + + def __new__(cls, schema=default_schema): + class CustomPropertyGroup(bpy.types.PropertyGroup): + pass + for definition in schema['blender']: + # Translate and filter parameters + try: + propmap = prop_types[definition['type']] + except KeyError: + # If no 'type' specified or 'type' not found, default to string: + propmap = prop_types['string'] + + filtered = {} + for param in definition: + if 'translate' in propmap and param in propmap['translate']: + filter = propmap['translate'][param][1] + if callable(filter): + # Filtered translation + filtered[propmap['translate'][param][0]] = filter(schema, definition[param]) + else: + # Simple translation + filtered[propmap['translate'][param][0]] = definition[param] + + # Create the Blender Property object + kwargs = dict((key,filtered[key]) for key in propmap['keywords'] if key in filtered) + setattr(CustomPropertyGroup, definition['id'], propmap['property'](**kwargs)) + + bpy.utils.register_class(CustomPropertyGroup) + return(CustomPropertyGroup) + + + +class BlenderContext(RecursiveDict): + """ + Dictionary accumulating data from sources within the currently-open Blender file. + """ + filepath = '' + defaults = {} + + def __init__(self): + self.clear() + + @classmethod + def update(cls): + try: + cls.file_metadata = bpy.data.window_managers['WinMan'].metadata + except AttributeError: + bpy.data.window_managers['WinMan'].new(FileMeta()) + + + def clear(self): + for key in self: + del self[key] + self.update(self.defaults) + + + + + \ No newline at end of file diff --git a/pkg/abx/context.py b/pkg/abx/context.py new file mode 100644 index 0000000..0ea3c65 --- /dev/null +++ b/pkg/abx/context.py @@ -0,0 +1,26 @@ +# context.py +""" +Combines context sources to create AbxContext object (dictionary tree). +""" + +import bpy, bpy.app, bpy.data, bpy.ops + +from bpy.app.handlers import persistent +#from accumulate import UnionList, RecursiveDict + +from . import file_context + +if os.path.exists(bpy.data.filepath): + BlendfileContext = file_context.FileContext(bpy.data.filepath) +else: + BlendfileContext = file_context.FileContext() + +# Attach a handler to keep our filepath context up to date with Blender +@persistent +def update_handler(ctxt): + BlendfileContext.update(bpy.data.filepath) + +bpy.app.handlers.save_post.append(update_handler) +bpy.app.handlers.load_post.append(update_handler) +bpy.app.handlers.scene_update_post.append(update_handler) + diff --git a/pkg/abx/copy_anim.py b/pkg/abx/copy_anim.py new file mode 100644 index 0000000..b3a3c59 --- /dev/null +++ b/pkg/abx/copy_anim.py @@ -0,0 +1,126 @@ +# copy_anim.py +""" +Blender Python code to copy animation between armatures or proxy armatures. +""" + +import bpy, bpy.types, bpy.utils, bpy.props + +#---------------------------------------- +## TOOLS +# This might be moved into another module later + +def copy_object_animation(sourceObj, targetObjs, + dopesheet=False, nla=False, rescale=False, scale_factor=1.0, + report=print): + """ + Copy Dope Sheet & NLA editor animation from active object to selected objects. + Most useful with armatures. Assumes bones match. Can be rescaled in the process. + + From StackExchange post: + https://blender.stackexchange.com/questions/74183/how-can-i-copy-nla-tracks-from-one-armature-to-another + """ + for targetObj in targetObjs: + if targetObj.animation_data is not None: + targetObj.animation_data_clear() + + targetObj.animation_data_create() + + source_animation_data = sourceObj.animation_data + target_animation_data = targetObj.animation_data + + # copy the dopesheet animation (active animation) + if dopesheet: + report({'INFO'}, 'Copying Dopesheet animation') + if source_animation_data.action is None: + report({'WARNING'}, + "CLEARING target dope sheet - old animation saved with 'fake user'") + if target_animation_data.action is not None: + target_animation_data.action.use_fake_user = True + target_animation_data.action = None + else: + if rescale: + target_animation_data.action = copy_animation_action_with_rescale( + source_animation_data.action, scale_factor) + else: + target_animation_data.action = copy_animation_action_with_rescale( + source_animation_data.action, scale_factor) + + target_animation_data.action.name = targetObj.name + 'Action' + + if nla: + report({'INFO'}, 'Copying NLA strips') + if source_animation_data: + # Create new NLA tracks based on the source + for source_nla_track in source_animation_data.nla_tracks: + target_nla_track = target_animation_data.nla_tracks.new() + target_nla_track.name = source_nla_track.name + # In each track, create action strips base on the source + for source_action_strip in source_nla_track.strips: + + if rescale: + new_action = copy_animation_action_with_rescale( + source_action_strip.action, scale_factor) + else: + new_action = source_action_strip.action + + target_action_strip = target_nla_track.strips.new( + new_action.name, + source_action_strip.frame_start, + new_action) + + # For each strip, copy the properties -- EXCEPT the ones we + # need to protect or can't copy + # introspect property names (is there a better way to do this?) + props = [p for p in dir(source_action_strip) if + not p in ('action',) + and not p.startswith('__') and not p.startswith('bl_') + and source_action_strip.is_property_set(p) + and not source_action_strip.is_property_readonly(p) + and not source_action_strip.is_property_hidden(p)] + for prop in props: + setattr(target_action_strip, prop, getattr(source_action_strip, prop)) + + +# Adapted from reference: +# https://www.reddit.com/r/blender/comments/eu3w6m/guide_how_to_scale_a_rigify_rig/ +# + +def reset_armature_stretch_constraints(rig_object): + """ + Reset stretch-to constraints on an armature object - necessary after rescaling. + """ + bone_count = 0 + for bone in rig_object.pose.bones: + for constraint in bone.constraints: + if constraint.type == "STRETCH_TO": + constraint.rest_length = 0 + bone_count += 1 + return bone_count + + +def rescale_animation_action_in_place(action, scale_factor): + """ + Rescale a list of animation actions by a scale factor (in-place). + """ + #for fcurve in bpy.data.actions[action].fcurves: + for fcurve in action.fcurves: + data_path = fcurve.data_path + if data_path.startswith('pose.bones[') and data_path.endswith('].location'): + for p in fcurve.keyframe_points: + p.co[1] *= scale_factor + p.handle_left[1] *= scale_factor + p.handle_right[1] *= scale_factor + return action + +def copy_animation_action_with_rescale(action, scale_factor): + """ + Copy an animation action, rescaled. + """ + new_action = action.copy() + new_action.name = new_action.name[:-4]+'.rescale' + return rescale_animation_action_in_place(new_action, scale_factor) + + + + +#---------------------------------------- diff --git a/pkg/abx/file_context.py b/pkg/abx/file_context.py new file mode 100644 index 0000000..4dec9b9 --- /dev/null +++ b/pkg/abx/file_context.py @@ -0,0 +1,1266 @@ +# file_context.py +""" +Contextual metadata acquired from the file system, file name, directory structure, and +sidecar data files. + +Data is acquired from file and directory names and also from yaml files in the tree. +The yaml files are loaded in increasing priority from metadata.yaml, abx.yaml, .yaml. +They are also loaded from the top of the tree to the bottom, with the most local Values +overriding the top-level ones. + +@author: Terry Hancock + +@copyright: 2019 Anansi Spaceworks. + +@license: GNU General Public License, version 2.0 or later. (Python code) + Creative Commons Attribution-ShareAlike, version 3.0 or later. (Website Templates). + +@contact: digitante@gmail.com + +Demo: +>>> +>>> fc = FileContext(TESTPATH) + +>>> fc.notes +['Data from implicit + explicit sources'] + +>>> fc['project']['name'] +'My Project' + +>>> fc['episode']['code'] +1 + +>>> fc['rank'] +'block' + +>>> fc['block']['title'] +'Beginning Of End' + +>>> fc['seq']['title'] +'LastPoint' + +>>> fc['episode']['title'] +'Pilot' +>>> fc['hierarchy'] +'episode' + +>>> fc['filename'] +'A.001-LP-1-BeginningOfEnd-anim.txt' + +>>> fc['path'] +'/project/terry/Dev/eclipse-workspace/ABX/testdata/myproject/Episodes/A.001-Pilot/Seq/LP-LastPoint/A.001-LP-1-BeginningOfEnd-anim.txt' + +>>> fc.root +'/project/terry/Dev/eclipse-workspace/ABX/testdata/myproject' + +""" + +import os, re, copy, string, collections +import yaml + +DEFAULT_YAML = {} +with open(os.path.join(os.path.dirname(__file__), 'abx.yaml')) as def_yaml_file: + DEFAULT_YAML.update(yaml.safe_load(def_yaml_file)) + + +TESTPATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'testdata', 'myproject', 'Episodes', 'A.001-Pilot', 'Seq', 'LP-LastPoint', 'A.001-LP-1-BeginningOfEnd-anim.txt')) + +from . import accumulate + +from .accumulate import RecursiveDict + +wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)') + +class Enum(dict): + def __init__(self, *options): + for i, option in enumerate(options): + if isinstance(option, list) or isinstance(option, tuple): + name = option[0] + self[i] = tuple(option) + else: + name = str(option) + self[i] = (option, option, option) + self[name] = i + if name not in ('name', 'number', 'options'): + setattr(self, name, i) + + @property + def options(self): + """ + This gives the options in a Blender-friendly format, with + tuples of three strings for initializing bpy.props.Enum(). + + If the Enum was initialized with strings, the options will + contain the same string three times. If initialized with + tuples of strings, they will be used unaltered. + """ + options = [] + number_keys = sorted([k for k in self.keys() if type(k) is int]) + return [self[i] for i in number_keys] + + def name(self, n): + if type(n) is int: + return self[n][0] + elif type(n) is str: + return n + else: + return None + + def number(self, n): + if type(n) is str: + return self[n] + elif type(n) is int: + return n + else: + return None + + +log_level = Enum('DEBUG', 'INFO', 'WARNING', 'ERROR') + + +NameParsers = {} # Parser registry + +def registered_parser(parser): + """ + Decorator function to register a parser class. + """ + NameParsers[parser.name] = parser + return parser + +@registered_parser +class Parser_ABX_Episode: + """ + Default filename parsing algorithm. + + Assumes field-based filenames of the form: + + E[-[-[-Cam][-]]][-]-<role>.<filetype> + + Where the <field> indicates fields with fieldnames, and there are three expected separators: + + - is the 'field_separator' + E is the 'episode_separator' + . is the 'filetype_separator' + + (These can be overridden in the initialization). + The class is callable, taking a string as input and returning a dictionary of fields. + """ + name = 'abx_episode' + + max_score = 10 # Maximum number of fields parsed + + # supported values for filetype + filetypes = { + 'blend': "Blender File", + 'kdenlive': "Kdenlive Video Editor File", + 'mlt': "Kdenlive Video Mix Script", + 'svg': "Scalable Vector Graphics (Inkscape)", + 'kra': "Krita Graphic File", + 'xcf': "Gimp Graphic File", + 'png': "Portable Network Graphics (PNG) Image", + 'jpg': "Joint Photographic Experts Group (JPEG) Image", + 'aup': "Audacity Project", + 'ardour': "Ardour Project", + 'flac': "Free Lossless Audio Codec (FLAC)", + 'mp3': "MPEG Audio Layer III (MP3) Audio File", + 'ogg': "Ogg Vorbis Audio File", + 'avi': "Audio Video Interleave (AVI) Video Container", + 'mkv': "Matroska Video Container", + 'mp4': "Moving Picture Experts Group (MPEG) 4 Format}", + 'txt': "Plain Text File" + } + + # Roles that make sense in an episode context + roles = { + 'extras': "Extras, crowds, auxillary animated movement", + 'mech': "Mechanical animation", + 'anim': "Character animation", + 'cam': "Camera direction", + 'vfx': "Visual special effects", + 'compos': "Compositing", + 'bkg': "Background 2D image", + 'bb': "Billboard 2D image", + 'tex': "Texture 2D image", + 'foley': "Foley sound", + 'voice': "Voice recording", + 'fx': "Sound effects", + 'music': "Music track", + 'cue': "Musical cue", + 'amb': "Ambient sound", + 'loop': "Ambient sound loop", + 'edit': "Video edit" + } + + # A few filetypes imply their roles: + roles_by_filetype = { + 'kdenlive': 'edit', + 'mlt': 'edit' + } + + + def __init__(self, field_separator='-', episode_separator='E', filetype_separator='.', + fields=None, filetypes=None, roles=None, **kwargs): + if not fields: + fields = {} + if filetypes: + self.filetypes = copy.deepcopy(self.filetypes) # Copy class attribute to instance + self.filetypes.update(filetypes) # Update with new values + if roles: + self.roles = copy.deepcopy(self.roles) # Copy class attribute to instance + self.roles.update(roles) # Update with new values + self.field_separator = field_separator + self.episode_separator = episode_separator + self.filetype_separator = filetype_separator + + def __call__(self, filename, namepath): + score = 0.0 + fielddata = {} + + # Check for filetype ending + i_filetype = filename.rfind(self.filetype_separator) + if i_filetype < 0: + fielddata['filetype'] = None + else: + fielddata['filetype'] = filename[i_filetype+1:] + filename = filename[:i_filetype] + score = score + 1.0 + + components = filename.split(self.field_separator) + + # Check for role marker in last component + if components[-1] in self.roles: + fielddata['role'] = components[-1] + del components[-1] + fielddata['hierarchy'] = 'episode' + score = score + 2.0 + elif fielddata['filetype'] in self.roles_by_filetype: + fielddata['role'] = self.roles_by_filetype[fielddata['filetype']] + fielddata['hierarchy'] = 'episode' + else: + fielddata['role'] = None + fielddata['hierarchy'] = None + + # Check for a descriptive title (must be 3+ characters in length) + if components and len(components[-1])>2: + # Normalize the title as words with spaces + title = ' '.join(w for w in wordre.split(components[-1]) if wordre.fullmatch(w)) + del components[-1] + score = score + 1.0 + else: + title = None + + # Check if first field contains series/episode number + if components: + prefix = components[0] + try: + fielddata['series'] = {} + fielddata['episode'] = {} + fielddata['series']['code'], episode_id = prefix.split(self.episode_separator) + fielddata['episode']['code'] = int(episode_id) + fielddata['rank'] = 'episode' + del components[0] + score = score + 2.0 + except: + pass + + # Check for sequence/block/shot/camera designations + if components: + fielddata['seq'] = {} + fielddata['seq']['code'] = components[0] + fielddata['rank'] = 'seq' + del components[0] + score = score + 1.0 + + if components: + try: + fielddata['block'] = {} + fielddata['block']['code'] = int(components[0]) + del components[0] + fielddata['rank'] = 'block' + score = score + 1.0 + except: + pass + + if components and components[0].startswith('Cam'): + fielddata['camera'] = {} + fielddata['camera']['code'] = components[0][len('Cam'):] + fielddata['rank'] = 'camera' + del components[0] + score = score + 1.0 + + if components: + # Any remaining structure is joined back to make the shot ID + fielddata['shot'] = {} + fielddata['shot']['code'] = ''.join(components) + fielddata['rank'] = 'shot' + components = None + score = score + 1.0 + + if title and fielddata['rank'] in fielddata: + fielddata[fielddata['rank']]['title'] = title + + return score/self.max_score, fielddata + +@registered_parser +class Parser_ABX_Schema(object): + """ + Parser based on using the project_schema defined in the project root directory YAML. + """ + name = 'abx_schema' + + def __init__(self, schemas=None, definitions=None, + filetype_separator = '.', + comment_separator = '--', + role_separator = '-', + title_separator = '-', + **kwargs): + + self.filetype_separator = filetype_separator + self.comment_separator = comment_separator + self.role_separator = role_separator + self.title_separator = title_separator + + self.schemas = schemas + + if 'roles' in definitions: + self.roles = definitions['roles'] + else: + self.roles = [] + + if 'filetypes' in definitions: + self.filetypes = definitions['filetypes'] + else: + self.filetypes = [] + + if 'roles_by_filetype' in definitions: + self.roles_by_filetype = definitions['roles_by_filetype'] + else: + self.roles_by_filetype = [] + + def _parse_ending(self, filename, separator): + try: + remainder, suffix = filename.rsplit(separator, 1) + score = 1.0 + except ValueError: + remainder = filename + suffix = None + score = 0.0 + return (suffix, remainder, score) + + def _parse_beginning(self, filename, separator): + try: + prefix, remainder = filename.split(separator, 1) + score = 1.0 + except ValueError: + prefix = filename + remainder = '' + score = 0.0 + return (prefix, remainder, score) + + def __call__ (self, filename, namepath, debug=False): + fields = {} + score = 0.0 + possible = 0.0 + + # First get specially-handled extensions + remainder = filename + field, newremainder, s = self._parse_ending(remainder, self.filetype_separator) + if field and field in self.filetypes: + remainder = newremainder + fields['filetype'] = field + score += s*1.0 + else: + fields['filetype'] = None + + field, remainder, s = self._parse_ending(remainder, self.comment_separator) + fields['comment'] = field + score += s*0.5 + + field, newremainder, s = self._parse_ending(remainder, self.role_separator) + if field and field in self.roles: + remainder = newremainder + fields['role'] = field + score += s*0.5 + else: + fields['role'] = None + + field, remainder, s = self._parse_ending(remainder, self.title_separator) + fields['title'] = field + score += s*0.5 + + possible += 3.0 + + # Implicit roles + if ( not fields['role'] and + fields['filetype'] and + fields['role'] in self.roles_by_filetype): + self.role = self.roles_by_filetype[fields['filetype']] + score += 0.2 + + #possible += 0.2 + + # Figure the rest out from the schema + # Find the matching rank start position for the filename + start = 0 + for start, (schema, name) in enumerate(zip(self.schemas, namepath)): + field, r, s = self._parse_beginning(remainder, schema.delimiter) + try: + if field.lower() == schema.format.format(name).lower(): + score += 1.0 + break + except ValueError: + print(' (365) field, format', field, schema.format) + + possible += 1.0 + + # Starting from that position, try to match fields + # up to the end of the namepath (checking against it) + irank = 0 + for irank, (schema, name) in enumerate( + zip(self.schemas[start:], namepath[start:])): + if not remainder: break + field, remainder, s = self._parse_beginning(remainder, schema.delimiter) + score += s + try: + if ( type(field) == str and + field.lower() == schema.format.format(name).lower()): + fields[schema.rank]={'code':field} + fields['rank'] = schema.rank + score += 1.0 + except ValueError: + print(' (384) field, format', field, schema.format) + possible += 2.0 + + # Remaining fields are authoritative (doesn't affect score) + for schema in self.schemas[irank:]: + if not remainder: break + field, remainder, s = self._parse_beginning(remainder, schema.delimiter) + fields[schema.rank]={'code':field} + fields['rank'] = schema.rank + + if 'rank' in fields: + fields[fields['rank']]['title'] = fields['title'] + + if not fields['role'] and fields['filetype'] in self.roles_by_filetype: + fields['role'] = self.roles_by_filetype[fields['filetype']] + + return score/possible, fields + +@registered_parser +class Parser_ABX_Fallback(object): + """ + Highly-tolerant parser to fall back to if the others fail + or can't be used. + """ + name = 'abx_fallback' + + filetypes = DEFAULT_YAML['definitions']['filetypes'] + roles = DEFAULT_YAML['definitions']['roles'] + roles_by_filetype = ( + DEFAULT_YAML['definitions']['roles_by_filetype']) + + main_sep_re = re.compile(r'\W+') # Any single non-word char + comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+') + + + def __init__(self, **kwargs): + pass + + def _parse_ending(self, filename, separator): + try: + remainder, suffix = filename.rsplit(separator, 1) + score = 1.0 + except ValueError: + remainder = filename + suffix = None + score = 0.0 + return (suffix, remainder, score) + + def __call__(self, filename, namepath): + fields = {} + score = 1.0 + possible = 4.5 + + split = filename.rsplit('.', 1) + if len(split)<2 or split[1] not in self.filetypes: + fields['filetype'] = None + remainder = filename + score += 1.0 + else: + fields['filetype'] = split[1] + remainder = split[0] + + comment_match = self.comment_sep_re.search(remainder) + if comment_match: + fields['comment'] = remainder[comment_match.end():] + remainder = remainder[:comment_match.start()] + else: + fields['comment'] = None + + role = self.main_sep_re.split(remainder)[-1] + if role in self.roles: + fields['role'] = role + remainder = remainder[:-1-len(role)] + score += 1.0 + else: + fields['role'] = None + + # Implied role + if fields['filetype'] in self.roles_by_filetype: + fields['role'] = self.roles_by_filetype[fields['filetype']] + score += 1.0 + + words = self.main_sep_re.split(remainder) + fields['code'] = ''.join([w.capitalize() for w in words]) + fields['title'] = remainder + + return score/possible, fields + + + +class RankNotFound(LookupError): + pass + +class NameSchema(object): + """ + Represents a schema used for parsing and constructing designations, names, etc. + """ + # Defaults + _default_schema = { + 'delimiter':'-', + + 'type': 'string', + 'format':'{:s}', + 'minlength':1, # Must be at least one character + 'maxlength':0, # 0 means unlimited + 'words': False, # If true, treat value as words and spaces + 'pad': '0', # Left-padding character for fixed length + 'default': None, + + 'rank': 'project', + 'irank': 0, + 'ranks': ('series', 'episode', 'sequence', + 'block', 'camera', 'shot', 'element') + } + + _codetypes = { + 'number':{}, + 'string':{}, + 'letter':{}, + 'lowercase':{}, + } + + _letter = tuple((A,A,A) for A in string.ascii_uppercase) + _lowercase = tuple((a,a,a) for a in string.ascii_lowercase) + + rank = 'project' + irank = 0 + default = None + + ranks = ('project',) + + def __init__(self, parent=None, rank=None, schema=None, debug=False): + # Three types of schema data: + + # Make sure schema is a copy -- no side effects! + if not schema: + schema = {} + else: + s = {} + s.update(schema) + schema = s + + if not rank and 'rank' in schema: + rank = schema['rank'] + + # Stepped down in rank from parent: + self.parent = parent + + if parent and rank: + # Check rank is defined in parent ranks and use that + # We can skip optional ranks + if rank in parent.ranks: + j = parent.ranks.index(rank) + self.ranks = parent.ranks[j+1:] + self.rank = rank + else: + # It's an error to ask for a rank that isn't defined + raise RankNotFound( + '"%s" not in defined ranks for "%s"' % (rank, parent)) + + elif parent and not rank: + # By default, get the first rank below parent + self.rank = parent.ranks[0] + self.ranks = parent.ranks[1:] + + elif rank and not parent: + # With no parent, we're starting a new tree and renaming the root + self.rank = rank + self.ranks = self._default_schema['ranks'] + + else: # not rank and not parent: + # New tree with default rank + self.rank = self._default_schema['rank'] + self.ranks = self._default_schema['ranks'] + + # Directly inherited/acquired from parent + # So far, only need a delimiter specified, but might be other stuff + self.delimiter = self._default_schema['delimiter'] + if parent and parent.delimiter: self.delimiter = parent.delimiter + + # Explicit override by the new schema: + if 'ranks' in schema: self.ranks = schema['ranks'] + if 'delimiter' in schema: self.delimiter = schema['delimiter'] + if 'default' in schema: + if schema['default'] == 'None': + self.default = None + else: + self.default = schema['default'] + + # Default unless specified (i.e. not inherited from parent) + newschema = {} + newschema.update(self._default_schema) + newschema.update(schema) + + self.format = str(newschema['format']) + + self.minlength = int(newschema['minlength']) + self.maxlength = int(newschema['maxlength']) + self.pad = str(newschema['pad']) + self.words = bool(newschema['words']) + + if newschema['type'] == 'letter': + self.codetype = self._letter + + elif newschema['type'] == 'lowercase': + self.codetype = self._lowercase + + elif newschema['type'] == 'number': + # Recognized Python types + self.codetype = int + if 'minlength' or 'maxlength' in schema: + self.format = '{:0>%dd}' % self.minlength + + elif newschema['type'] == 'string': + self.codetype = str + + if ('minlength' in schema) or ('maxlength' in schema): + if self.maxlength == 0: + # Special case for unlimited length + self.format = '{:%1.1s>%ds}' % (self.pad, self.minlength) + self.format = '{:%1.1s>%d.%ds}' % ( + self. pad, self.minlength, self.maxlength) + + elif newschema['type'] == 'bool': + self.codetype = bool + + elif isinstance(newschema['type'], collections.Sequence): + # Enumerated types + # This is somewhat specific to Blender -- setting the + # enumeration values requires a sequence in a particular format + self.codetype = [] + for option in newschema['type']: + if type(option) is not str and isinstance(option, collections.Sequence): + option = tuple([str(e) for e in option][:3]) + else: + option = (str(option), str(option), str(option)) + self.codetype.append(option) + + elif isinstance(newschema['type'], collections.Mapping): + self.codetype = [] + for key, val in newschema['type'].items(): + if type(val) is not str and isinstance(val, collections.Sequence): + if len(val) == 0: + option = (str(key), str(key), str(key)) + elif len(val) == 1: + option = (str(key), str(val[0]), str(val[0])) + else: + option = (str(key), str(val[0]), str(val[1])) + else: + option = (str(key), str(val), str(val)) + self.codetype.append(option) + else: + # If all else fails, just list the string + self.codetype = None + + + + def __repr__(self): + return('<(%s).NameSchema: %s (%s, %s, %s, (%s))>' % ( + repr(self.parent), + #self.irank, + self.rank, + self.delimiter, + self.default, + self.format, + self.codetype + )) + + +class NameContext(object): + """ + Single naming context within the file (e.g. a Blender scene). + """ + + def __init__(self, container, fields=None, namepath_segment=(), ): + self.clear() + if container or fields or namepath_segment: + self.update(container, fields, namepath_segment) + + def clear(self): + self.fields = {} + self.schemas = ['project'] + self.rank = 0 + self.code = 'untitled' + self.container = None + self.namepath_segment = [] + + def update(self, container=None, fields=None, namepath_segment=()): + self.container = container + + if namepath_segment: + self.namepath_segment = namepath_segment + else: + self.namepath_segment = [] + + try: + self.schemas = self.container.schemas + except AttributeError: + self.schemas = [] + + try: + self.omit_ranks = self.container.omit_ranks + except AttributeError: + self.omit_ranks = {} + self.omit_ranks.update({ + 'edit': 0, + 'render': 1, + 'filename': 1, + 'scene': 3}) + + if fields: + if isinstance(fields, dict): + self.fields.update(fields) + elif isinstance(fields, str): + self.fields.update(yaml.safe_load(fields)) + + def update_fields(self, data): + self.fields.update(data) + + def _load_schemas(self, schemas, start=0): + """ + Load schemas from a list of schema dictionaries. + + @schemas: list of dictionaries containing schema field data (see NameSchema). + The data will typically be extracted from YAML, and is + expected to be a list of dictionaries, each of which defines + fields understood by the NameSchema class, to instantiate + NameSchema objects. The result is a linked chain of schemas from + the top of the project tree down. + + @start: if a start value is given, the top of the existing schema + chain is kept, and the provided schemas starts under the rank of + the start level in the existing schema. This is what happens when + the schema is locally overridden at some point in the hierarchy. + """ + self.schemas = self.schemas[:start] + if self.schemas: + last = self.schemas[-1] + else: + last = None + for schema in schemas: + self.schemas.append(NameSchema(last, schema['rank'], schema=schema)) + #last = self.schemas[-1] + + def _parse_words(self, wordtext): + words = [] + groups = re.split(r'[\W_]', wordtext) + for group in groups: + if len(group)>1: + group = group[0].upper() + group[1:] + words.extend(re.findall(r'[A-Z][a-z]*', group)) + elif len(group)==1: + words.append(group[0].upper()) + else: + continue + return words + + def _cap_words(self, words): + return ''.join(w.capitalize() for w in words) + + def _underlower_words(self, words): + return '_'.join(w.lower() for w in words) + + def _undercap_words(self, words): + return '_'.join(w.capitalize() for w in words) + + def _spacecap_words(self, words): + return ' '.join(w.capitalize() for w in words) + + def _compress_name(self, name): + return self._cap_words(self._parse_words(name)) + + @property + def namepath(self): + if self.container: + return self.container.namepath + self.namepath_segment + else: + return self.namepath_segment + + @property + def rank(self): + if 'rank' in self.fields: + return self.fields['rank'] + else: + return None + + @rank.setter + def rank(self, rank): + self.fields['rank'] = rank + + @property + def name(self): + if 'name' in self.fields: + return self.fields['name'] + elif 'title' in self.fields: + return self._compress_name(self.fields['title']) +# elif 'code' in self.fields: +# return self.fields['code'] + else: + return '' + + @name.setter + def name(self, name): + self.fields['name'] = name + + @property + def code(self): + if self.rank: + return self.fields[self.rank]['code'] + else: + return self.fields['code'] + + @code.setter + def code(self, code): + if self.rank: + self.fields[self.rank] = {'code': code} + else: + self.fields['code'] = code + + @property + def description(self): + if 'description' in self.fields: + return self.fields['description'] + else: + return '' + + @description.setter + def description(self, description): + self.fields['description'] = str(description) + + def _get_name_components(self): + components = [] + for code, schema in zip(self.namepath, self.schemas): + if code is None: continue + components.append(schema.format.format(code)) + components.append(schema.delimiter) + return components[:-1] + + @property + def fullname(self): + if self.name: + return (self.designation + + self.schemas[-1].delimiter + + self._compress_name(self.name) ) + else: + return self.designation + + @property + def designation(self): + return ''.join(self._get_name_components()) + + @property + def shortname(self): + namebase = self.omit_ranks['filename']*2 + return (''.join(self._get_name_components()[namebase:]) + + self.schemas[-1].delimiter + + self._compress_name(self.name)) + + def get_scene_name(self, suffix=''): + namebase = self.omit_ranks['scene']*2 + desig = ''.join(self._get_name_components()[namebase:]) + + if suffix: + return desig + ' ' + suffix + else: + return desig + + def get_render_path(self, suffix='', framedigits=5, ext='png'): + + desig = ''.join(self._get_name_components()[self.omit_ranks['render']+1:]) + + if ext in ('avi', 'mov', 'mp4', 'mkv'): + if suffix: + path = os.path.join(self.render_root, suffix, + desig + '-' + suffix + '.' + ext) + else: + path = os.path.join(self.render_root, ext.upper(), + desig + '.' + ext) + else: + if suffix: + path = os.path.join(self.render_root, + suffix, desig, + desig + '-' + suffix + '-f' + '#'*framedigits + '.' + ext) + else: + path = os.path.join(self.render_root, + ext.upper(), desig, + desig + '-f' + '#'*framedigits + '.' + ext) + return path + + + +class FileContext(NameContext): + """ + Collected information about an object's location on disk: metadata + about filename, directory names, and project, based on expected keywords. + """ +# hierarchies = () +# hierarchy = None + #schema = None + + # IMMUTABLE DEFAULTS: + filepath = None + root = None + folders = () + #ranks = () + project_units = () + + filename = None + + fields = None + #subunits = () + + code = '_' + +# defaults = { +# 'filetype': None, 'role': None, 'hierarchy': None, 'project': None, +# 'series': None, 'episode': None, 'seq': None, 'block': None, +# 'camera': None, 'shot': None, 'title': None } + + def __init__(self, path=None): + """ + Collect path context information from a given filepath. + (Searches the filesystem for context information). + """ + NameContext.__init__(self, None, {}) + self.clear() + self.clear_notes() + if path: + self.update(path) + + def clear(self): + NameContext.clear(self) + + # Identity + self.root = os.path.abspath(os.environ['HOME']) + self.render_root = os.path.join(self.root, 'Renders') + self.filetype = '' + self.role = '' + self.title = '' + self.comment = '' + + # Containers + #self.notes = [] + self.name_contexts = [] + + # Status / Settings + self.filepath = None + self.filename = None + self.file_exists = False + self.folder_exists = False + self.omit_ranks = { + 'edit': 0, + 'render': 0, + 'filename': 0, + 'scene': 0} + + # Defaults + self.provided_data = RecursiveDict(DEFAULT_YAML) + self.abx_fields = DEFAULT_YAML['abx'] + + def clear_notes(self): + # We use this for logging, so it doesn't get cleared by the + # normal clear process. + self.notes = [] + + def update(self, path): + # Basic File Path Info + self.filepath = os.path.abspath(path) + self.filename = os.path.basename(path) + + # Does the file path exist? + if os.path.exists(path): + self.file_exists = True + self.folder_exists = True + else: + self.file_exists = False + if os.path.exists(os.path.dirname(path)): + self.folder_exists = True + else: + self.folder_exists = False + + # - Should we create it? / Are we creating it? + + # We should add a default YAML file in the ABX software to guarantee + # necessary fields are in place, and to document the configuration for + # project developers. + + # Data from YAML Files + #self._collect_yaml_data() + self.provided_data = RecursiveDict(DEFAULT_YAML) + + kitcat_root, kitcat_data, abx_data = accumulate.get_project_data(self.filepath) + self.root = kitcat_root + self.provided_data.update(kitcat_data) + path = os.path.abspath(os.path.normpath(self.filepath)) + root = os.path.abspath(self.root) + self.folders = [os.path.basename(self.root)] + self.folders.extend(os.path.normpath(os.path.relpath(path, root)).split(os.sep)[:-1]) + + self.abx_fields = abx_data + # Did we find the YAML data for the project? + # Did we find the project root? + + # TODO: Bug? + # Note that 'project_schema' might not be correct if overrides are given. + # As things are, I think it will simply append the overrides, and this + # may lead to odd results. We'd need to actively compress the list by + # overwriting according to rank + # + try: + self._load_schemas(self.provided_data['project_schema']) + self.namepath_segment = [d['code'] for d in self.provided_data['project_unit']] + self.code = self.namepath[-1] + except: + print("Errors finding Name Path (is there a 'project_schema' or 'project_unit' defined?") + pass + # print("\n(899) filename = ", self.filename) + # if 'project_schema' in self.provided_data: + # print("(899) project_schema: ", self.provided_data['project_schema']) + # else: + # print("(899) project schema NOT DEFINED") + # + # print("(904) self.namepath_segment = ", self.namepath_segment) + + + # Was there a "project_schema" section? + # - if not, do we fall back to a default? + + # Was there a "project_unit" section? + # - if not, can we construct what we need from project_root & folders? + + # Is there a definitions section? + # Do we provide defaults? + + try: + self.render_root = os.path.join(self.root, + self.provided_data['definitions']['render_root']) + except KeyError: + self.render_root = os.path.join(self.root, 'Renders') + + self.omit_ranks = {} + try: + for key, val in self.provided_data['definitions']['omit_ranks'].items(): + self.omit_ranks[key] = int(val) + except KeyError: + self.omit_ranks.update({ + 'edit': 0, + 'render': 1, + 'filename': 1, + 'scene': 3}) + + # Data from Parsing the File Name + try: + self.parsers = [NameParsers[self.provided_data['definitions']['parser']](**self.schema['filenames'])] + except (TypeError, KeyError, IndexError): + self.parsers = [ + #Parser_ABX_Episode(), + Parser_ABX_Schema(self.schemas, self.provided_data['definitions'])] + + parser_chosen, parser_score = self._parse_filename() + self.log(log_level.INFO, "Parsed with %s, score: %d" % + (parser_chosen, parser_score)) + + + + # TODO: + # We don't currently consider the information from the folder names, + # though we could get some additional information this way + + + + def __repr__(self): + s = '{0}(data='.format(self.__class__.__name__) + #s = s + super().__repr__() + s = s + str(self.code) + '(' + str(self.rank) + ')' + s = s + ')' + return s + + def log(self, level, msg): + if type(level) is str: + level = log_level.index(level) + self.notes.append((level, msg)) + + def get_log_text(self, level=log_level.INFO): + level = log_level.number(level) + return '\n'.join([ + ': '.join((log_level.name(note[0]), note[1])) + for note in self.notes + if log_level.number(note[0]) >= level]) + + def _parse_filename(self): + """ + Try available fuzzy data parsers on the filename, and pick the one + that returns the best score. + """ + fields = {} + best_score = 0.0 + best_parser_name = None + for parser in self.parsers: + score, fielddata = parser(self.filename, self.namepath) + if score > best_score: + fields = fielddata + best_parser_name = parser.name + best_score = score + self.fields.update(fields) + self._pull_up_last_rank_fields() + return best_parser_name, best_score + + def _pull_up_last_rank_fields(self): + if ( 'rank' in self.fields and + self.fields['rank'] in self.fields and + isinstance(self.fields[self.fields['rank']], collections.Mapping) ): + for key, val in self.fields[self.fields['rank']].items(): + self.fields[key] = val + + +# def _collect_yaml_data(self): + + @property + def filetype(self): + if 'filetype' in self.fields: + return self.fields['filetype'] + else: + return '' + + @filetype.setter + def filetype(self, filetype): + self.fields['filetype'] = filetype + + @property + def role(self): + if 'role' in self.fields: + return self.fields['role'] + else: + return '' + + @role.setter + def role(self, role): + self.fields['role'] = role + + @property + def title(self): + if 'title' in self.fields: + return self.fields['title'] + else: + return '' + + @title.setter + def title(self, title): + self.fields['title'] = title + + @property + def comment(self): + if 'comment' in self.fields: + return self.fields['comment'] + else: + return '' + + @comment.setter + def comment(self, comment): + self.fields['comment'] = comment + + @classmethod + def deref_implications(cls, values, matchfields): + subvalues = {} + for key in values: + # TODO: is it safe to use type tests here instead of duck tests? + if type(values[key])==int and values[key] < len(matchfields): + subvalues[key]=matchfields[values[key]] + elif type(values[key]==dict): + subvalues[key]=cls.deref_implications(values[key], matchfields) + elif type(values[key]==list): + vals = [] + for val in values[key]: + vals.append(cls.deref_implications(val, matchfields)) + return subvalues + + def get_path_implications(self, path): + data = {} + prefix = r'(?:.*/)?' + suffix = r'(?:/.*)?' + for implication in self.schema['path_implications']: + matched = re.compile(prefix + implication['match'] + suffix).match(path) + if matched and matched.groups: + data.update(self.deref_implications(implication['values'], matched.groups())) + return data + + def new_name_context(self, rank=None, **kwargs): + """ + Get a subunit from the current file. + Any rank in the hierarchy may be specified, though element, shot, + camera, and block are most likely. + """ + fields = {} + fields.update(self.fields) + + namepath_segment = [] + ranks = [s.rank for s in self.schemas] + i_rank = len(self.namepath) + old_rank = ranks[i_rank -1] + + # The new rank will be the highest rank mentioned, or the + # explicitly requested rank or + # one rank past the namepath + # + new_rank = self.schemas[i_rank].rank + + for schema in self.schemas[i_rank:]: + if schema.rank in kwargs: + fields[schema.rank] = {'code':kwargs[schema.rank]} + new_rank = schema.rank + namepath_segment.append(kwargs[schema.rank]) + elif rank is not None: + namepath_segment.append(schema.default) + if ranks.index(schema.rank) <= ranks.index(rank): + new_rank = schema.rank + + delta_rank = ranks.index(new_rank) - ranks.index(old_rank) + + # Truncate to the new rank: + namepath_segment = namepath_segment[:delta_rank] + + fields['rank'] = new_rank + fields['code'] = namepath_segment[-1] + + self.name_contexts.append(NameContext(self, fields, + namepath_segment=namepath_segment)) + return self.name_contexts[-1] + + + + + + + + \ No newline at end of file diff --git a/pkg/abx/render_profile.py b/pkg/abx/render_profile.py new file mode 100644 index 0000000..6a19430 --- /dev/null +++ b/pkg/abx/render_profile.py @@ -0,0 +1,203 @@ +# render_profile.py +""" +Blender Python code to set parameters based on render profiles. +""" + +import bpy +import bpy, bpy.types, bpy.utils, bpy.props + +from . import std_lunatics_ink + +from . import file_context + + +class RenderProfile(object): + render_formats = { + # VERY simplified and limited list of formats from Blender that we need: + # <API 'format'>: (<bpy file format>, <filename extension>), + 'PNG': ('PNG', 'png'), + 'JPG': ('JPEG', 'jpg'), + 'EXR': ('OPEN_EXR_MULTILAYER', 'exr'), + 'AVI': ('AVI_JPEG', 'avi'), + 'MKV': ('FFMPEG', 'mkv') + } + + engines = { + 'bi': 'BLENDER_RENDER', + 'BLENDER_RENDER': 'BLENDER_RENDER', + 'BI': 'BLENDER_RENDER', + + 'cycles': 'CYCLES', + 'CYCLES': 'CYCLES', + + 'bge': 'BLENDER_GAME', + 'BLENDER_GAME': 'BLENDER_GAME', + 'BGE': 'BLENDER_GAME', + + 'gl': None, + 'GL': None + } + + + def __init__(self, fields): + + # Note: Settings w/ value *None* are left unaltered + # That is, they remain whatever they were before + # If a setting isn't included in the fields, then + # the attribute will be *None*. + + if 'engine' not in fields: + fields['engine'] = None + + if fields['engine']=='gl': + self.viewport_render = True + self.engine = None + else: + self.viewport_render = False + + if fields['engine'] in self.engines: + self.engine = self.engines[fields['engine']] + else: + self.engine = None + + # Parameters which are stored as-is, without modification: + self.fps = 'fps' in fields and int(fields['fps']) or None + self.fps_skip = 'fps_skip' in fields and int(fields['fps_skip']) or None + self.fps_divisor = 'fps_divisor' in fields and float(fields['fps_divisor']) or None + self.rendersize = 'rendersize' in fields and int(fields['rendersize']) or None + self.compress = 'compress' in fields and int(fields['compress']) or None + + self.format = 'format' in fields and str(fields['format']) or None + + self.freestyle = 'freestyle' in fields and bool(fields['freestyle']) or None + + self.antialiasing_samples = None + self.use_antialiasing = None + if 'antialias' in fields: + if fields['antialias']: + self.use_antialiasing = True + if fields['antialias'] in (5,8,11,16): + self.antialiasing_samples = str(fields['antialias']) + else: + self.use_antialiasing = False + + self.use_motion_blur = None + self.motion_blur_samples = None + if 'motionblur' in fields: + if fields['motionblur']: + self.use_motion_blur = True + if type(fields['motionblur'])==int: + self.motion_blur_samples = int(fields['motionblur']) + else: + self.use_motion_blur = False + + if 'framedigits' in fields: + self.framedigits = fields['framedigits'] + else: + self.framedigits = 5 + + if 'suffix' in fields: + self.suffix = fields['suffix'] + else: + self.suffix = '' + + def apply(self, scene): + """ + Apply the profile settings to the given scene. + """ + if self.engine: scene.render.engine = self.engine + if self.fps: scene.render.fps = self.fps + if self.fps_skip: scene.frame_step = self.fps_skip + if self.fps_divisor: scene.render.fps_base = self.fps_divisor + if self.rendersize: scene.render.resolution_percentage = self.rendersize + if self.compress: scene.render.image_settings.compression = self.compress + + if self.format: + scene.render.image_settings.file_format = self.render_formats[self.format][0] + + if self.freestyle: scene.render.use_freestyle = self.freestyle + if self.use_antialiasing: + scene.render.use_antialiasing = self.use_antialiasing + + if self.antialiasing_samples: + scene.render.antialiasing_samples = self.antialiasing_samples + if self.use_motion_blur: + scene.render.use_motion_blur = self.use_motion_blur + + if self.motion_blur_samples: + scene.render.motion_blur_samples = self.motion_blur_samples + + if self.format: + # prefix = scene.name_context.render_path + # prefix = BlendfileContext.name_contexts[scene.name_context].render_path + prefix = 'path_to_render' # We actually need to get this from NameContext + if self.suffix: + scene.render.filepath = (prefix + '-' + self.suffix + '-' + + 'f'+('#'*self.framedigits) + '.' + + self.render_formats[self.format][1]) + + + +# def set_render_from_profile(scene, profile): +# if 'engine' in profile: +# if profile['engine'] == 'gl': +# pass +# elif profile['engine'] == 'bi': +# scene.render.engine = 'BLENDER_RENDER' +# elif profile['engine'] == 'cycles': +# scene.render.engine = 'CYCLES' +# elif profile['engine'] == 'bge': +# scene.render.engine = 'BLENDER_GAME' +# +# if 'fps' in profile: +# scene.render.fps = profile['fps'] +# +# if 'fps_skip' in profile: +# scene.frame_step = profile['fps_skip'] +# +# if 'format' in profile: +# scene.render.image_settings.file_format = render_formats[profile['format']][0] +# +# if 'freestyle' in profile: +# scene.render.use_freestyle = profile['freestyle'] +# +# if 'antialias' in profile: +# if profile['antialias']: +# scene.render.use_antialiasing = True +# if profile['antialias'] in (5,8,11,16): +# scene.render.antialiasing_samples = str(profile['antialias']) +# else: +# scene.render.use_antialiasing = False +# +# if 'motionblur' in profile: +# if profile['motionblur']: +# scene.render.use_motion_blur = True +# if type(profile['motionblur'])==int: +# scene.render.motion_blur_samples = profile['motionblur'] +# else: +# scene.render.use_motion_blur = False +# +# # Use Lunatics naming scheme for render target: +# if 'framedigits' in profile: +# framedigits = profile['framedigits'] +# else: +# framedigits = 5 +# +# if 'suffix' in profile: +# suffix = profile['suffix'] +# else: +# suffix = '' +# +# if 'format' in profile: +# rdr_fmt = render_formats[profile['format']][0] +# ext = render_formats[profile['format']][1] +# else: +# rdr_fmt = 'PNG' +# ext = 'png' +# +# path = std_lunatics_ink.LunaticsShot(scene).render_path( +# suffix=suffix, framedigits=framedigits, ext=ext, rdr_fmt=rdr_fmt) +# +# scene.render.filepath = path + + \ No newline at end of file diff --git a/pkg/abx/std_lunatics_ink.py b/pkg/abx/std_lunatics_ink.py new file mode 100644 index 0000000..1e81df9 --- /dev/null +++ b/pkg/abx/std_lunatics_ink.py @@ -0,0 +1,678 @@ +# std_lunatics_ink.py +""" +Functions to set up the standard ink and paint compositing arrangement +for "Lunatics" +""" + +import os + +import bpy, bpy.props, bpy.utils + +# Hard-coded default parameters: +INK_THICKNESS = 3 +INK_COLOR = (0,0,0) +THRU_INK_THICKNESS = 2 +THRU_INK_COLOR = (20,100,50) + + + +# TODO: probably should have a dialog somewhere that can change these through the UI? + +class LunaticsShot(object): + """ + General class for Lunatics Blender Scene data. + """ + colorcode = { + 'paint': (1.00, 1.00, 1.00), + 'ink': (0.75, 0.50, 0.35), + 'thru': (0.35, 0.50, 0.75), + 'bb': (0.35, 0.75, 0.50), + 'bbthru': (0.35, 0.75, 0.75), + 'sky': (0.50, 0.25, 0.75), + 'compos': (0.75, 0.75, 0.75), + 'output': (0.35, 0.35, 0.35) + } + + def __init__(self, scene, inkthru=False, billboards=False, sepsky=False): + self.scene = scene + self.inkthru = bool(inkthru) + self.billboards = bool(billboards) + self.sepsky = bool(sepsky) + + self.series_id = scene.lunaprops.series_id + self.episode_id = scene.lunaprops.episode_id + self.seq_id = scene.lunaprops.seq_id + self.block_id = scene.lunaprops.block_id + self.shot_id = scene.lunaprops.shot_id + self.cam_id = scene.lunaprops.cam_id + self.shot_name = scene.lunaprops.shot_name + + self.render_root = '//../../Renders/' + + @property + def fullname(self): + return self.designation + '-' + self.name + + @property + def designation(self): + episode_code = "%2.2sE%2.2d" % (self.series_id, self.episode_id) + return episode_code + '-' + self.shortname + + @property + def shortname(self): + desig = str(self.seq_id) + '-' + str(self.block_id) + if self.cam_id: + desig = desig + '-Cam' + str(self.cam_id) + if self.shot_id: + desig = desig + '-' + str(self.shot_id) + return desig + + @property + def scene_name(self): + if self.shot_name: + return self.shortname + ' ' + self.shot_name + else: + return self.shortname + + def render_path(self, suffix='', framedigits=5, ext='png', rdr_fmt='PNG'): + if suffix: + suffix = '-' + suffix + if rdr_fmt in ('AVI', 'MKV'): + path = os.path.join(self.render_root, suffix, + self.designation + suffix + '.' + ext) + else: + path = os.path.join(self.render_root, suffix, self.designation, + self.designation + suffix + '-f' + '#'*framedigits + '.' + ext) + return path + + def cfg_scene(self, scene=None, thru=True, exr=True, multicam=False, role='shot'): + if not scene: + scene = self.scene + + scene.name = self.scene_name + scene.render.filepath = self.render_path() + #os.path.join(self.render_root, 'PNG', self.designation, self.designation + '-f#####.png') + scene.render.image_settings.file_format='PNG' + scene.render.image_settings.compression = 50 + scene.render.image_settings.color_mode = 'RGB' + scene.render.use_freestyle = True + + # Create Paint & Ink Render Layers + for rlayer in scene.render.layers: + rlayer.name = '~' + rlayer.name + rlayer.use = False + # Rename & turn off existing layers (but don't delete, in case they were wanted) + + scene.render.layers.new('Paint') + self.cfg_paint(scene.render.layers['Paint']) + + scene.render.layers.new('Ink') + self.cfg_ink(scene.render.layers['Ink'], + thickness=INK_THICKNESS, color=INK_COLOR) + + if self.inkthru: + scene.render.layers.new('Ink-Thru') + self.cfg_ink(scene.render.layers['Ink-Thru'], + thickness=THRU_INK_THICKNESS, color=THRU_INK_COLOR) + + if self.billboards: + scene.render.layers.new('BB-Alpha') + self.cfg_bbalpha(scene.render.layers['BB-Alpha']) + + scene.render.layers.new('BB-Mat') + self.cfg_bbmat(scene.render.layers['BB-Mat'], thru=False) + + if self.billboards and self.inkthru: + scene.render.layers.new('BB-Mat-Thru') + self.cfg_bbmat(scene.render.layers['BB-Mat-Thru'], thru=True) + + if self.sepsky: + scene.render.layers.new('Sky') + self.cfg_sky(scene.render.layers['Sky']) + + self.cfg_nodes(scene) + + def _new_rlayer_in(self, name, scene, rlayer, location, color): + tree = scene.node_tree + rlayer_in = tree.nodes.new('CompositorNodeRLayers') + rlayer_in.name = '_'.join([n.lower() for n in name.split('-')])+'_in' + rlayer_in.label = name+'-In' + rlayer_in.scene = scene + rlayer_in.layer = rlayer + rlayer_in.color = color + rlayer_in.use_custom_color = True + rlayer_in.location = location + return rlayer_in + + def cfg_nodes(self, scene): + # Create Compositing Node Tree + scene.use_nodes = True + tree = scene.node_tree + # clear default nodes + for node in tree.nodes: + tree.nodes.remove(node) + + # Paint RenderLayer Nodes + paint_in = self._new_rlayer_in('Paint', scene, 'Paint', + (0,1720), self.colorcode['paint']) + + if self.sepsky: + sky_in = self._new_rlayer_in('Sky', scene, 'Sky', + (0, 1200), self.colorcode['sky']) + + # Configure EXR format + exr_paint = tree.nodes.new('CompositorNodeOutputFile') + exr_paint.name = 'exr_paint' + exr_paint.label = 'Paint EXR' + exr_paint.location = (300,1215) + exr_paint.color = self.colorcode['paint'] + exr_paint.use_custom_color = True + exr_paint.format.file_format = 'OPEN_EXR_MULTILAYER' + exr_paint.format.color_mode = 'RGBA' + exr_paint.format.color_depth = '16' + exr_paint.format.exr_codec = 'ZIP' + exr_paint.base_path = os.path.join(self.render_root, 'EXR', + self.designation, self.designation + '-Paint-f#####' + '.exr') + if 'Image' in exr_paint.layer_slots: + exr_paint.layer_slots.remove(exr_paint.inputs['Image']) + + # Create EXR layers and connect to render passes + rpasses = ['Image', 'Depth', 'Normal', 'Vector', + 'Spec', 'Shadow','Reflect','Emit'] + for rpass in rpasses: + exr_paint.layer_slots.new(rpass) + tree.links.new(paint_in.outputs[rpass], exr_paint.inputs[rpass]) + + if self.sepsky: + exr_paint.layer_slots.new('Sky') + tree.links.new(sky_in.outputs['Image'], exr_paint.inputs['Sky']) + + # Ink RenderLayer Nodes + ink_in = self._new_rlayer_in('Ink', scene, 'Ink', + (590, 1275), self.colorcode['ink']) + + if self.inkthru: + thru_in = self._new_rlayer_in('Thru', scene, 'Ink-Thru', + (590, 990), self.colorcode['thru']) + + if self.billboards: + bb_in = self._new_rlayer_in('BB', scene, 'BB-Alpha', + (0, 870), self.colorcode['bb']) + + bb_mat = self._new_rlayer_in('BB-Mat', scene, 'BB-Mat', + (0, 590), self.colorcode['bb']) + + if self.inkthru and self.billboards: + bb_mat_thru = self._new_rlayer_in('BB-Mat-Thru', scene, 'BB-Mat-Thru', + (0, 280), self.colorcode['bbthru']) + + # Ink EXR + exr_ink = tree.nodes.new('CompositorNodeOutputFile') + exr_ink.name = 'exr_ink' + exr_ink.label = 'Ink EXR' + exr_ink.location = (1150,700) + exr_ink.color = self.colorcode['ink'] + exr_ink.use_custom_color = True + exr_ink.format.file_format = 'OPEN_EXR_MULTILAYER' + exr_ink.format.color_mode = 'RGBA' + exr_ink.format.color_depth = '16' + exr_ink.format.exr_codec = 'ZIP' + exr_ink.base_path = os.path.join(self.render_root, 'EXR', + self.designation, self.designation + '-Ink-f#####' + '.exr') + + # Create EXR Ink layers and connect + if 'Image' in exr_ink.layer_slots: + exr_ink.layer_slots.remove(exr_ink.inputs['Image']) + exr_ink.layer_slots.new('Ink') + tree.links.new(ink_in.outputs['Image'], exr_ink.inputs['Ink']) + + if self.inkthru: + exr_ink.layer_slots.new('Ink-Thru') + tree.links.new(thru_in.outputs['Image'], exr_ink.inputs['Ink-Thru']) + + if self.billboards: + exr_ink.layer_slots.new('BB-Alpha') + tree.links.new(bb_in.outputs['Alpha'], exr_ink.inputs['BB-Alpha']) + + exr_ink.layer_slots.new('BB-Mat') + tree.links.new(bb_mat.outputs['IndexMA'], exr_ink.inputs['BB-Mat']) + + if self.inkthru and self.billboards: + exr_ink.layer_slots.new('BB-Mat-Thru') + tree.links.new(bb_mat_thru.outputs['IndexMA'], exr_ink.inputs['BB-Mat-Thru']) + + + # Preview Compositing + mix_shadow = tree.nodes.new('CompositorNodeMixRGB') + mix_shadow.name = 'mix_shadow' + mix_shadow.label = 'Mix-Shadow' + mix_shadow.location = (510,1820) + mix_shadow.color = self.colorcode['compos'] + mix_shadow.use_custom_color = True + mix_shadow.blend_type = 'MULTIPLY' + mix_shadow.inputs['Fac'].default_value = 0.6 + mix_shadow.use_clamp = True + tree.links.new(paint_in.outputs['Image'], mix_shadow.inputs[1]) + tree.links.new(paint_in.outputs['Shadow'], mix_shadow.inputs[2]) + + mix_reflect = tree.nodes.new('CompositorNodeMixRGB') + mix_reflect.name = 'mix_reflect' + mix_reflect.label = 'Mix-Reflect' + mix_reflect.location = (910, 1620) + mix_reflect.color = self.colorcode['compos'] + mix_reflect.use_custom_color = True + mix_reflect.blend_type = 'ADD' + mix_reflect.inputs['Fac'].default_value = 1.1 + mix_reflect.use_clamp = True + tree.links.new(paint_in.outputs['Reflect'], mix_reflect.inputs[2]) + + mix_emit = tree.nodes.new('CompositorNodeMixRGB') + mix_emit.name = 'mix_emit' + mix_emit.label = 'Mix-Emit' + mix_emit.location = (1110, 1520) + mix_emit.blend_type = 'ADD' + mix_emit.inputs['Fac'].default_value = 1.1 + mix_emit.use_clamp = True + tree.links.new(mix_reflect.outputs['Image'], mix_emit.inputs[1]) + tree.links.new(paint_in.outputs['Emit'], mix_emit.inputs[2]) + + if self.sepsky: + sky_mix = tree.nodes.new('CompositorNodeMixRGB') + sky_mix.name = 'sky_mix' + sky_mix.label = 'Sky Mix' + sky_mix.location = (710,1720) + sky_mix.color = self.colorcode['sky'] + sky_mix.use_custom_color = True + sky_mix.blend_type = 'MIX' + sky_mix.use_clamp = True + tree.links.new(sky_in.outputs['Image'], sky_mix.inputs[1]) + tree.links.new(paint_in.outputs['Alpha'], sky_mix.inputs['Fac']) + tree.links.new(mix_shadow.outputs['Image'], sky_mix.inputs[2]) + tree.links.new(sky_mix.outputs['Image'], mix_reflect.inputs[1]) + else: + tree.links.new(mix_shadow.outputs['Image'], mix_reflect.inputs[1]) + + if self.billboards: + mat_idx = tree.nodes.new('CompositorNodeIDMask') + mat_idx.name = "mat_idx" + mat_idx.label = "BB-ID" + mat_idx.location = (260, 670) + mat_idx.index = 1 + mat_idx.use_antialiasing = True + mat_idx.color = self.colorcode['bb'] + mat_idx.use_custom_color = True + tree.links.new(bb_mat.outputs['IndexMA'], mat_idx.inputs['ID value']) + + combine_bb_ma = tree.nodes.new('CompositorNodeMath') + combine_bb_ma.name = 'combine_bb_ma' + combine_bb_ma.label = 'Material x BB' + combine_bb_ma.location = (440,670) + combine_bb_ma.color = self.colorcode['bb'] + combine_bb_ma.use_custom_color = True + combine_bb_ma.operation = 'MULTIPLY' + combine_bb_ma.use_clamp = True + tree.links.new(mat_idx.outputs['Alpha'], combine_bb_ma.inputs[0]) + tree.links.new(bb_in.outputs['Alpha'], combine_bb_ma.inputs[1]) + + invert_bb_mask = tree.nodes.new('CompositorNodeInvert') + invert_bb_mask.name = 'invert_bb_mask' + invert_bb_mask.label = 'Invert Mask' + invert_bb_mask.location = (650,670) + invert_bb_mask.color = self.colorcode['bb'] + invert_bb_mask.use_custom_color = True + invert_bb_mask.invert_rgb = True + tree.links.new(combine_bb_ma.outputs['Value'], invert_bb_mask.inputs['Color']) + + bb_ink_mask = tree.nodes.new('CompositorNodeMath') + bb_ink_mask.name = 'bb_ink_mask' + bb_ink_mask.label = 'BB Ink Mask' + bb_ink_mask.location = (1150,1315) + bb_ink_mask.color = self.colorcode['bb'] + bb_ink_mask.use_custom_color = True + bb_ink_mask.operation = 'MULTIPLY' + bb_ink_mask.use_clamp = True + tree.links.new(invert_bb_mask.outputs['Color'], bb_ink_mask.inputs[0]) + + blur_ink = tree.nodes.new('CompositorNodeBlur') + blur_ink.name = 'blur_ink' + blur_ink.label = 'Blur-Ink' + blur_ink.location = (1620, 1110) + blur_ink.color = self.colorcode['ink'] + blur_ink.use_custom_color = True + blur_ink.filter_type = 'FAST_GAUSS' + blur_ink.size_x = 1.0 + blur_ink.size_y = 1.0 + blur_ink.use_extended_bounds = False + blur_ink.inputs['Size'].default_value = 1.0 + + if self.inkthru: + merge_ink_ao = tree.nodes.new('CompositorNodeAlphaOver') + merge_ink_ao.name = 'merge_ink' + merge_ink_ao.label = 'Merge-Ink' + merge_ink_ao.location = (1150,910) + merge_ink_ao.color = self.colorcode['thru'] + merge_ink_ao.use_custom_color = True + merge_ink_ao.use_premultiply = False + merge_ink_ao.premul = 0.0 + merge_ink_ao.inputs['Fac'].default_value = 1.0 + tree.links.new(ink_in.outputs['Image'], merge_ink_ao.inputs[1]) + tree.links.new(thru_in.outputs['Image'], merge_ink_ao.inputs[2]) + tree.links.new(merge_ink_ao.outputs['Image'], blur_ink.inputs['Image']) + else: + tree.links.new(ink_in.outputs['Image'], blur_ink.inputs['Image']) + + overlay_ink = tree.nodes.new('CompositorNodeAlphaOver') + overlay_ink.name = 'Overlay Ink' + overlay_ink.label = 'Overlay Ink' + overlay_ink.location = (1820,1315) + overlay_ink.color = self.colorcode['compos'] + overlay_ink.use_custom_color = True + overlay_ink.use_premultiply = False + overlay_ink.premul = 0.0 + overlay_ink.inputs['Fac'].default_value = 1.0 + tree.links.new(mix_emit.outputs['Image'], overlay_ink.inputs[1]) + tree.links.new(blur_ink.outputs['Image'], overlay_ink.inputs[2]) + + if self.billboards: + tree.links.new(ink_in.outputs['Alpha'], bb_ink_mask.inputs[1]) + tree.links.new(bb_ink_mask.outputs['Value'], overlay_ink.inputs['Fac']) + + if self.inkthru and self.billboards: + mat_idx_thru = tree.nodes.new('CompositorNodeIDMask') + mat_idx_thru.name = "mat_idx_thru" + mat_idx_thru.label = "BB-ID-Thru" + mat_idx_thru.location = (260, 425) + mat_idx_thru.index = 1 + mat_idx_thru.use_antialiasing = True + mat_idx_thru.color = self.colorcode['bbthru'] + mat_idx_thru.use_custom_color = True + tree.links.new(bb_mat_thru.outputs['IndexMA'], mat_idx_thru.inputs['ID value']) + + combine_bbthru_ma = tree.nodes.new('CompositorNodeMath') + combine_bbthru_ma.name = 'combine_bbthru_ma' + combine_bbthru_ma.label = 'Material x BB-Thru' + combine_bbthru_ma.location = (440,425) + combine_bbthru_ma.color = self.colorcode['bbthru'] + combine_bbthru_ma.use_custom_color = True + combine_bbthru_ma.operation = 'MULTIPLY' + combine_bbthru_ma.use_clamp = True + tree.links.new(mat_idx_thru.outputs['Alpha'], combine_bbthru_ma.inputs[0]) + tree.links.new(bb_in.outputs['Alpha'], combine_bbthru_ma.inputs[1]) + + invert_bbthru_mask = tree.nodes.new('CompositorNodeInvert') + invert_bbthru_mask.name = 'invert_bbthru_mask' + invert_bbthru_mask.label = 'Invert Mask' + invert_bbthru_mask.location = (650,425) + invert_bbthru_mask.color = self.colorcode['bbthru'] + invert_bbthru_mask.use_custom_color = True + invert_bbthru_mask.invert_rgb = True + tree.links.new(combine_bbthru_ma.outputs['Value'], invert_bbthru_mask.inputs['Color']) + + bb_thru_mask = tree.nodes.new('CompositorNodeMath') + bb_thru_mask.name = 'bb_thru_mask' + bb_thru_mask.label = 'BB Ink Thru Mask' + bb_thru_mask.location = (1150,1115) + bb_thru_mask.color = self.colorcode['bbthru'] + bb_thru_mask.use_custom_color = True + bb_thru_mask.operation = 'MULTIPLY' + bb_thru_mask.use_clamp = True + tree.links.new(thru_in.outputs['Alpha'], bb_thru_mask.inputs[0]) + tree.links.new(invert_bbthru_mask.outputs['Color'], bb_thru_mask.inputs[1]) + + merge_bb_ink_masks = tree.nodes.new('CompositorNodeMath') + merge_bb_ink_masks.name = 'merge_bb_ink_masks' + merge_bb_ink_masks.label = 'Merge BB Ink Masks' + merge_bb_ink_masks.location = (1415, 1215) + merge_bb_ink_masks.color = self.colorcode['bbthru'] + merge_bb_ink_masks.use_custom_color = True + merge_bb_ink_masks.operation = 'ADD' + merge_bb_ink_masks.use_clamp = True + tree.links.new(bb_ink_mask.outputs['Value'], merge_bb_ink_masks.inputs[0]) + tree.links.new(bb_thru_mask.outputs['Value'], merge_bb_ink_masks.inputs[1]) + + tree.links.new(merge_bb_ink_masks.outputs['Value'], overlay_ink.inputs['Fac']) + + composite = tree.nodes.new('CompositorNodeComposite') + composite.name = 'Composite' + composite.label = 'Preview Render' + composite.location = (2050,1215) + composite.color = self.colorcode['output'] + composite.use_custom_color = True + composite.use_alpha = True + composite.inputs['Alpha'].default_value = 1.0 + composite.inputs['Z'].default_value = 1.0 + tree.links.new(overlay_ink.outputs['Image'], composite.inputs['Image']) + + def _cfg_renderlayer(self, rlayer, + includes=False, passes=False, excludes=False, + layers=range(20)): + # Utility to set all the includes and passes on or off, initially + + # Weird Includes (we never use these -- always have to turn these on explicitly) + rlayer.use_zmask = False + rlayer.invert_zmask = False + rlayer.use_all_z = False + + # Includes + rlayer.use_solid = includes + rlayer.use_halo = includes + rlayer.use_ztransp = includes + rlayer.use_sky = includes + rlayer.use_edge_enhance = includes + rlayer.use_strand = includes + rlayer.use_freestyle = includes + + # Passes + rlayer.use_pass_combined = passes + rlayer.use_pass_z = passes + rlayer.use_pass_vector = passes + rlayer.use_pass_normal = passes + + rlayer.use_pass_uv = passes + rlayer.use_pass_mist = passes + rlayer.use_pass_object_index = passes + rlayer.use_pass_material_index = passes + rlayer.use_pass_color = passes + + rlayer.use_pass_diffuse = passes + rlayer.use_pass_specular = passes + rlayer.use_pass_shadow = passes + rlayer.use_pass_emit = passes + + rlayer.use_pass_ambient_occlusion = passes + rlayer.use_pass_environment = passes + rlayer.use_pass_indirect = passes + + rlayer.use_pass_reflection = passes + rlayer.use_pass_refraction = passes + + # Exclusions + rlayer.exclude_specular = excludes + rlayer.exclude_shadow = excludes + rlayer.exclude_emit = excludes + rlayer.exclude_ambient_occlusion = excludes + rlayer.exclude_environment = excludes + rlayer.exclude_indirect = excludes + rlayer.exclude_reflection = excludes + rlayer.exclude_refraction = excludes + + for i in range(20): + if i in layers: + rlayer.layers[i] = True + else: + rlayer.layers[i] = False + + + def cfg_paint(self, paint_layer, name="Paint"): + + self._cfg_renderlayer(paint_layer, + includes=True, passes=False, excludes=False, + layers = (0,1,2,3,4, 5,6,7, 10,11,12,13,14)) + + # Includes + if self.sepsky: + paint_layer.use_sky = False + + paint_layer.use_freestyle = False + + # Passes + paint_layer.use_pass_combined = True + paint_layer.use_pass_z = True + paint_layer.use_pass_vector = True + paint_layer.use_pass_normal = True + + paint_layer.use_pass_shadow = True + paint_layer.exclude_shadow = True + + paint_layer.use_pass_emit = True + paint_layer.exclude_emit = True + + paint_layer.use_pass_specular = True + paint_layer.exclude_specular = True + + paint_layer.use_pass_reflection = True + paint_layer.exclude_reflection = True + + + def cfg_bbalpha(self, bb_render_layer): + self._cfg_renderlayer(bb_render_layer, + includes=False, passes=False, excludes=False, + layers=(5,6, 14)) + # Includes + bb_render_layer.use_solid = True + bb_render_layer.use_ztransp = True + # Passes + bb_render_layer.use_pass_combined = True + + def cfg_bbmat(self, bb_mat_layer, thru=False): + self._cfg_renderlayer(bb_mat_layer, + includes=False, passes=False, excludes=False, + layers=(0,1,2,3, 5,6,7, 10,11,12,13,14, 15,16)) + # Includes + bb_mat_layer.use_solid = True + bb_mat_layer.use_ztransp = True + + # Passes + bb_mat_layer.use_pass_combined = True + bb_mat_layer.use_pass_material_index = True + + if not thru: + bb_mat_layer.layers[4] = True + + + def cfg_sky(self, sky_render_layer): + self._cfg_renderlayer(sky_render_layer, + includes=False, passes=False, excludes=False, + layers=(0,1,2,3,4, 5,6,7, 10,11,12,13,14)) + # Includes + sky_render_layer.use_sky = True + # Passes + sky_render_layer.use_pass_combined = True + + + def cfg_ink(self, ink_layer, name="Ink", thickness=3, color=(0,0,0)): + self._cfg_renderlayer(ink_layer, + includes=False, passes=False, excludes=False, + layers=(0,1,2,3, 5,6,7, 10,11,12,13, 15,16)) + # Includes + ink_layer.use_freestyle = True + # Passes + ink_layer.use_pass_combined = True + + # Freestyle + ink_layer.freestyle_settings.crease_angle = 2.617944 + ink_layer.freestyle_settings.use_smoothness = True + ink_layer.freestyle_settings.use_culling = True + + if len(ink_layer.freestyle_settings.linesets)>0: + ink_layer.freestyle_settings.linesets[0].name = name + else: + ink_layer.freestyle_settings.linesets.new(name) + + lineset = ink_layer.freestyle_settings.linesets[name] + + self.cfg_lineset(lineset, thickness, color) + + # Turn on the transparency layer for the regular ink: + if ink_layer.name!='Ink-Thru': + ink_layer.layers[4] = True + + + def cfg_lineset(self, lineset, thickness=3, color=(0,0,0)): + """ + Configure the lineset. + """ + #lineset.name = 'NormalInk' + # Selection options + lineset.select_by_visibility = True + lineset.select_by_edge_types = True + lineset.select_by_image_border = True + lineset.select_by_face_marks = False + lineset.select_by_group = True + + # Visibility Option + lineset.visibility = 'VISIBLE' + + # Edge Type Options + lineset.edge_type_negation = 'INCLUSIVE' + lineset.edge_type_combination = 'OR' + lineset.select_silhouette = True + lineset.select_border = True + lineset.select_contour = True + lineset.select_crease = True + lineset.select_edge_mark = True + lineset.select_external_contour = True + + # No Freestyle Group (If it exists) + if 'No Freestyle' in bpy.data.groups: + lineset.select_by_group = True + lineset.group = bpy.data.groups['No Freestyle'] + lineset.group_negation = 'EXCLUSIVE' + else: + lineset.select_by_group = False + + # Basic Ink linestyle: + if 'Ink' in bpy.data.linestyles: + lineset.linestyle = bpy.data.linestyles['Ink'] + else: + lineset.linestyle.name = 'Ink' + self.cfg_linestyle(lineset.linestyle, thickness, color) + + + def cfg_linestyle(self, linestyle, thickness=INK_THICKNESS, color=INK_COLOR): + # These are the only changeable parameters: + linestyle.color = color + linestyle.thickness = thickness + + # The rest of this function just sets a common fixed style for "Lunatics!" + linestyle.alpha = 1.0 + linestyle.thickness_position = 'CENTER' + linestyle.use_chaining = True + linestyle.chaining = 'PLAIN' + linestyle.use_same_object = True + linestyle.caps = 'ROUND' + + # ADD THE ALONG-STROKE MODIFIER CURVE + # TODO: try using the .new(type=...) idiom to see if it works? + # This probably needs the scene context set? + # bpy.ops.scene.freestyle_thickness_modifier_add(type='ALONG_STROKE') + + linestyle.thickness_modifiers.new(type='ALONG_STROKE', name='taper') + linestyle.thickness_modifiers['taper'].blend = 'MULTIPLY' + linestyle.thickness_modifiers['taper'].mapping = 'CURVE' + + # These are defaults, so maybe unnecessary? + linestyle.thickness_modifiers['taper'].influence = 1.0 + linestyle.thickness_modifiers['taper'].invert = False + linestyle.thickness_modifiers['taper'].value_min = 0.0 + linestyle.thickness_modifiers['taper'].value_max = 1.0 + + # This API is awful, but what it has to do is to change the location of the first two + # points (which can't be removed), then add a third point. Then update to pick up the + # changes: + linestyle.thickness_modifiers['taper'].curve.curves[0].points[0].location = (0.0,0.0) + linestyle.thickness_modifiers['taper'].curve.curves[0].points[1].location = (0.5,1.0) + linestyle.thickness_modifiers['taper'].curve.curves[0].points.new(1.0,0.0) + linestyle.thickness_modifiers['taper'].curve.update() + + \ No newline at end of file diff --git a/scripts/BlenderRemoteDebug.py b/scripts/BlenderRemoteDebug.py new file mode 100644 index 0000000..0eeb456 --- /dev/null +++ b/scripts/BlenderRemoteDebug.py @@ -0,0 +1,13 @@ +#script to run: +SCRIPT="/project/terry/Dev/eclipse-workspace/ABX/abx/abx_ui.py" + +#path to the PyDev folder that contains a file named pydevd.py: +#PYDEVD_PATH='/home/terry/.eclipse/360744294_linux_gtk_x86_64/plugins/org.python.pydev.core_7.3.0.201908161924/pysrc/' +PYDEVD_PATH='/home/terry/.eclipse/360744286_linux_gtk_x86_64/plugins/org.python.pydev.core_8.3.0.202104101217/pysrc/' + + +#PYDEVD_PATH='/home/terry/.config/blender/2.79/scripts/addons/modules/pydev_debug.py' + +import pydev_debug as pydev #pydev_debug.py is in a folder from Blender PYTHONPATH + +pydev.debug(SCRIPT, PYDEVD_PATH, trace = True) diff --git a/scripts/TestInBlender_bpy.py b/scripts/TestInBlender_bpy.py new file mode 100644 index 0000000..76d8b7f --- /dev/null +++ b/scripts/TestInBlender_bpy.py @@ -0,0 +1,15 @@ +# +# This testrunner is based on a Stack Overflow answer: +# https://stackoverflow.com/questions/1732438/how-do-i-run-all-python-unit-tests-in-a-directory +# +ABX_PATH = '/project/terry/Dev/Git/abx' + +import os, unittest + +loader = unittest.TestLoader() +start_dir = os.path.join(ABX_PATH, 'tests') +suite = loader.discover(start_dir) + +runner = unittest.TextTestRunner(verbosity=2) +runner.run(suite) + diff --git a/tests/__pycache__/test_accumulate.cpython-35.pyc b/tests/__pycache__/test_accumulate.cpython-35.pyc new file mode 100644 index 0000000..0ab3a36 Binary files /dev/null and b/tests/__pycache__/test_accumulate.cpython-35.pyc differ diff --git a/tests/__pycache__/test_file_context.cpython-35.pyc b/tests/__pycache__/test_file_context.cpython-35.pyc new file mode 100644 index 0000000..d84587b Binary files /dev/null and b/tests/__pycache__/test_file_context.cpython-35.pyc differ diff --git a/tests/__pycache__/test_render_profile.cpython-35.pyc b/tests/__pycache__/test_render_profile.cpython-35.pyc new file mode 100644 index 0000000..723484e Binary files /dev/null and b/tests/__pycache__/test_render_profile.cpython-35.pyc differ diff --git a/tests/test_file_context.py b/tests/test_file_context.py index f9c88ac..7231faf 100644 --- a/tests/test_file_context.py +++ b/tests/test_file_context.py @@ -16,6 +16,51 @@ sys.path.append(os.path.normpath(os.path.join(__file__, '..', '..'))) from abx import file_context +class FileContext_Utilities_Tests(unittest.TestCase): + """ + Test utility functions and classes that FileContext features depend on. + """ + + def test_enum_class_basics(self): + my_enum = file_context.Enum('ZERO', 'ONE', 'TWO', 'THREE') + + self.assertEqual(my_enum.number(my_enum.ZERO), 0) + self.assertEqual(my_enum.number(0), 0) + self.assertEqual(my_enum.number('ZERO'), 0) + + self.assertEqual(my_enum.name(my_enum.ZERO), 'ZERO') + self.assertEqual(my_enum.name(0), 'ZERO') + self.assertEqual(my_enum.name('ZERO'), 'ZERO') + + self.assertEqual(my_enum.ONE, 1) + self.assertEqual(my_enum.name(my_enum.TWO), 'TWO') + self.assertEqual(my_enum.name(2), 'TWO') + self.assertEqual(my_enum.number('THREE'), 3) + + def test_enum_class_blender_enum_options(self): + my_options = file_context.Enum( + ('ZP', 'ZeroPoint', 'Zero Point'), + ('FP', 'FirstPoint', 'First Point'), + ('LP', 'LastPoint', 'Last Point')) + + #print("dir(my_options) = ", dir(my_options)) + + self.assertEqual(my_options.number(my_options.ZP), 0) + self.assertEqual(my_options.number(my_options.FP), 1) + + self.assertEqual(my_options.name(my_options.ZP), 'ZP') + self.assertEqual(my_options.name(1), 'FP') + self.assertEqual(my_options.name('LP'), 'LP') + + self.assertEqual(my_options[my_options.number('FP')], + ('FP', 'FirstPoint', 'First Point')) + + self.assertListEqual(my_options.options, + [('ZP', 'ZeroPoint', 'Zero Point'), + ('FP', 'FirstPoint', 'First Point'), + ('LP', 'LastPoint', 'Last Point')]) + + class FileContext_NameSchema_Interface_Tests(unittest.TestCase): """ Test the interfaces presented by NameSchema. @@ -279,6 +324,46 @@ class FileContext_Parser_UnitTests(unittest.TestCase): 'rank': 'sequence'} ) + def test_parsing_filenames_w_fallback_parser(self): + abx_fallback_parser = file_context.NameParsers['abx_fallback']() + + data = abx_fallback_parser('S1E01-SF-4-SoyuzDMInt-cam.blend', None) + self.assertDictEqual(data[1], + {'filetype': 'blend', + 'role': 'cam', + 'comment': None, + 'title': 'S1E01-SF-4-SoyuzDMInt', + 'code': 'S1e01Sf4Soyuzdmint' + }) + + data = abx_fallback_parser('S1E01-SF-4-SoyuzDMInt-cam~~2021-01.blend', None) + self.assertDictEqual(data[1], + {'filetype': 'blend', + 'role': 'cam', + 'comment': '2021-01', + 'title': 'S1E01-SF-4-SoyuzDMInt', + 'code': 'S1e01Sf4Soyuzdmint' + }) + + + data = abx_fallback_parser('S1E02-MM-MediaMontage-compos.blend', None) + self.assertDictEqual(data[1], + {'filetype':'blend', + 'role':'compos', + 'comment': None, + 'title': 'S1E02-MM-MediaMontage', + 'code': 'S1e02MmMediamontage' + }) + + data = abx_fallback_parser('S1E01-PC-PressConference', None) + self.assertDictEqual(data[1], + {'filetype': None, + 'role': None, + 'comment': None, + 'title': 'S1E01-PC-PressConference', + 'code': 'S1e01PcPressconference' + }) + class FileContext_Implementation_UnitTests(unittest.TestCase): TESTDATA = os.path.abspath( @@ -290,6 +375,11 @@ class FileContext_Implementation_UnitTests(unittest.TestCase): def test_filecontext_finds_and_loads_file(self): fc = file_context.FileContext(self.TESTPATH) + +# print('\ntest_filecontext_finds_and_loads_file') +# print(fc.get_log_text('INFO')) +# print(dir(self)) + self.assertEqual(fc.filename, 'A.001-LP-1-BeginningOfEnd-anim.txt') self.assertEqual(fc.root, os.path.join(self.TESTDATA, 'myproject')) self.assertListEqual(fc.folders, @@ -311,6 +401,15 @@ class FileContext_Implementation_UnitTests(unittest.TestCase): self.assertEqual(fc.role, 'anim') self.assertEqual(fc.title, 'BeginningOfEnd') self.assertEqual(fc.comment, None) + + def test_filecontext_abx_fields_include_default(self): + fc0 = file_context.FileContext() + fc1 = file_context.FileContext('') + fc2 = file_context.FileContext(self.TESTPATH) + + for fc in (fc0, fc1, fc2): + self.assertIn('render_profiles', fc.abx_fields) + class FileContext_API_UnitTests(unittest.TestCase): @@ -416,21 +515,36 @@ class FileContext_FailOver_Tests(unittest.TestCase): 'yaminimal', 'Episodes', 'Ae1-Void', 'Seq', 'VN-VagueName', 'Ae1-VN-1-VoidOfData-anim.txt') + def test_filecontext_finds_default_yaml(self): + self.assertIn('abx_default', file_context.DEFAULT_YAML) + def test_filecontext_no_project_path(self): fc = file_context.FileContext() + self.assertFalse(fc.file_exists) + self.assertFalse(fc.folder_exists) + self.assertIn('abx_default', fc.provided_data) # What to test? # The main thing is that it doesn't crash. def test_filecontext_failover_empty_project(self): fc = file_context.FileContext(self.TEST_EMPTY_PROJECT) + self.assertFalse(fc.file_exists) + self.assertTrue(fc.folder_exists) + self.assertIn('abx_default', fc.provided_data) def test_filecontext_failover_nonexisting_file(self): fc = file_context.FileContext(self.TEST_NONEXISTENT_PATH) + self.assertFalse(fc.file_exists) + self.assertFalse(fc.folder_exists) + self.assertIn('abx_default', fc.provided_data) def test_filecontext_failover_no_yaml(self): fc = file_context.FileContext(self.TEST_NO_YAML) + self.assertIn('abx_default', fc.provided_data) + # It finds the backstop root YAML in the testdata: + self.assertEqual(fc.root, self.TESTDATA) def test_filecontext_failover_minimal_yaml(self): fc = file_context.FileContext(self.TEST_MINIMAL_YAML) - + self.assertIn('abx_default', fc.provided_data) diff --git a/tests/test_render_profile.py b/tests/test_render_profile.py new file mode 100644 index 0000000..ee9c794 --- /dev/null +++ b/tests/test_render_profile.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +""" +Test the render_profile module. + +This has to be run from within Blender. +See: + TestInBlender.py (injector script - call this to run the tests) + TestInBlender_bpy.py (injected test-runner script) +""" + + +import unittest, os, textwrap +import yaml + +import sys +print("__file__ = ", __file__) +sys.path.append(os.path.normpath(os.path.join(__file__, '..', '..'))) + +TESTDATA = os.path.join(os.path.abspath(__file__), '..', 'testdata') + +TESTPATH = os.path.join(TESTDATA, 'myproject', 'Episodes', 'A.001-Pilot', + 'Seq', 'LP-LastPoint', 'A.001-LP-1-BeginningOfEnd-anim.txt') + +import bpy + +import abx +from abx import file_context +from abx import render_profile + +class TestRenderProfile_Utils(unittest.TestCase): + def test_bpy_is_present(self): + self.assertTrue(abx.blender_present) + +class TestRenderProfile_Implementation(unittest.TestCase): + + TESTDATA = os.path.abspath(os.path.join(__file__, '..', 'testdata')) + + TESTPATH = os.path.join(TESTDATA, 'myproject', 'Episodes', 'A.001-Pilot', + 'Seq', 'LP-LastPoint', 'A.001-LP-1-BeginningOfEnd-anim.blend') + + + def setUp(self): + self.fc0 = file_context.FileContext(bpy.data.filepath) + self.fc1 = file_context.FileContext(self.TESTPATH) + self.scene = bpy.context.scene + + def test_blendfile_context(self): + self.assertEqual(self.fc0.filename, None) + self.assertEqual(self.fc1.filename, + 'A.001-LP-1-BeginningOfEnd-anim.blend') + + def test_abx_data_retrieved_defaults(self): + self.assertIn('render_profiles', self.fc0.abx_fields) + + def test_abx_data_retrieved_file(self): + self.assertIn('render_profiles', self.fc1.abx_fields) + + def test_abx_data_default_full_profile_correct(self): + FullProfile = render_profile.RenderProfile( + self.fc0.abx_fields['render_profiles']['full']) + FullProfile.apply(self.scene) + + self.assertEqual(self.scene.render.fps, 30) + self.assertEqual(self.scene.render.fps_base, 1.0) + self.assertTrue(self.scene.render.use_motion_blur) + self.assertTrue(self.scene.render.use_antialiasing) + self.assertEqual(self.scene.render.antialiasing_samples, '8') + self.assertEqual(self.scene.render.resolution_percentage, 100) + self.assertEqual(self.scene.render.image_settings.compression, 50) + + + + + \ No newline at end of file