Replaced parsers sub-package with parsers.py

After discovering that sub-packages in Blender add-ons don't work (this
seems to be very broken!), I replaced the parsers subpackage with a
parsers.py module (almost as simple as concatenating the source files in
the package). I may be removing this name-parsing system from ABX
entirely, soon, possibly moving it into KitCAT.
This commit is contained in:
filmfreedom-org 2021-06-29 13:53:48 -05:00
parent 9db1245ab3
commit 5e24e06796
23 changed files with 1698 additions and 1870 deletions

View File

@ -12,6 +12,8 @@ bl_info = {
"category": "Object",
}
blender_present = False
try:
# These are protected so we can read the add-on metadata from my

View File

@ -58,7 +58,7 @@ definitions:
abx:
render_profiles:
previz:
name: PreViz,
name: PreViz
desc: 'GL/AVI Previz Render for Animatics'
engine: gl
version: any
@ -66,7 +66,7 @@ abx:
fps_div: 1000
fps_skip: 1
suffix: GL
format: AVI_JPEG
format: AVI
extension: avi
freestyle: False
@ -77,7 +77,7 @@ abx:
fps: 30
fps_skip: 3
suffix: PT
format: AVI_JPEG
format: AVI
extension: avi
freestyle: False,
antialias: False,
@ -90,7 +90,7 @@ abx:
fps: 30
fps_skip: 30
suffix: CH
format: JPEG
format: JPG
extension: jpg
framedigits: 5
freestyle: True

View File

@ -29,21 +29,10 @@ import bpy, bpy.utils, bpy.types, bpy.props
from bpy.app.handlers import persistent
from . import file_context
# if bpy.data.filepath:
# BlendfileContext = file_context.FileContext(bpy.data.filepath)
# else:
# BlendfileContext = file_context.FileContext()
#
# abx_data = BlendfileContext.abx_data
from . import copy_anim
from abx import ink_paint
from . import ink_paint
from . import render_profile
#configfile = os.path.join(os.path.dirname(__file__), 'config.yaml')
#print("Configuration file path: ", os.path.abspath(configfile))
# Lunatics Scene Panel
@ -133,94 +122,9 @@ def get_seq_ids(self, context):
seq_enum_items = [(s, s, seq_id_table[series,episode][s]) for s in seq_ids]
return seq_enum_items
# Another hard-coded table -- for render profiles
render_profile_table = {
'previz': {
'name': 'PreViz',
'desc': 'GL/AVI Previz Render for Animatics',
'engine':'gl',
'version':'any',
'fps': 30,
'fps_div': 1000,
'fps_skip': 1,
'suffix': 'GL',
'format': 'AVI',
'freestyle': False
},
'paint6': {
'name': '6fps Paint',
'desc': '6fps Simplified Paint-Only Render',
'engine':'bi',
'fps': 30,
'fps_skip': 5,
'suffix': 'P6',
'format': 'AVI',
'freestyle': False,
'antialias': False,
'motionblur': False
},
'paint3': {
'name': '3fps Paint',
'desc': '3fps Simplified Paint-Only Render',
'engine': 'bi',
'fps': 30,
'fps_skip': 10,
'suffix': 'P3',
'format': 'AVI',
'freestyle': False,
'antialias': False,
'motionblur': False,
},
'paint': {
'name': '30fps Paint',
'desc': '30fps Simplified Paint-Only Render',
'engine': 'bi',
'fps': 30,
'fps_skip': 1,
'suffix': 'PT',
'format': 'AVI',
'freestyle': False,
'antialias': False,
'motionblur': False
},
'check': {
'name': '1fps Check',
'desc': '1fps Full-Features Check Renders',
'engine': 'bi',
'fps': 30,
'fps_skip': 30,
'suffix': 'CH',
'format': 'JPG',
'framedigits': 5,
'freestyle': True,
'antialias': 8
},
'full': {
'name': '30fps Full',
'desc': 'Full Render with all Features Turned On',
'engine': 'bi',
'fps': 30,
'fps_skip': 1,
'suffix': '',
'format': 'PNG',
'framedigits': 5,
'freestyle': True,
'antialias': 8
},
}
class LunaticsSceneProperties(bpy.types.PropertyGroup):
class ProjectProperties(bpy.types.PropertyGroup):
"""
Properties of the current scene.
NOTE: due to be replaced by 'ProjectProperties', using the schema data
retrieved by file_context.
Properties of the scene (and file), based on project context information.
"""
name_context_id = bpy.props.StringProperty(options={'HIDDEN', 'LIBRARY_EDITABLE'})
@ -232,6 +136,101 @@ class LunaticsSceneProperties(bpy.types.PropertyGroup):
name_context = BlendFile.new_name_context()
self.name_context_id = str(id(name_context))
return name_context
render_folder = bpy.props.StringProperty(
name = 'Render Folder',
description = 'Path to the render folder (without filename)',
subtype = 'FILE_PATH')
render_prefix = bpy.props.StringProperty(
name = 'Render Prefix',
description = 'Prefix used to create filenames used in rendering',
subtype = 'FILE_NAME')
designation = bpy.props.StringProperty(
name = 'Designation',
description = 'Short code for this Blender scene only',
maxlen=16)
role = bpy.props.EnumProperty(
name = 'Role',
description = 'Role of this scene in project',
items = (('cam', 'Camera', 'Camera direction and render to EXR'),
('compos', 'Compositing', 'Post-compositing from EXR'),
('anim', 'Animation', 'Character animation scene'),
('mech', 'Mechanical', 'Mech animation scene'),
('asset', 'Asset', 'Project model assets'),
('prop', 'Prop', 'Stage property asset'),
('char', 'Character', 'Character model asset'),
('prac', 'Practical', 'Practical property - rigged prop')),
default='cam')
frame_start = bpy.props.IntProperty(
name = 'Start',
description = "Start frame of shot (used to set the render start frame)",
soft_min = 0, soft_max=10000)
frame_end = bpy.props.IntProperty(
name = 'End',
description = "End frame of shot (used to set the render end frame)",
soft_min = 0, soft_max=10000)
frame_rate = bpy.props.IntProperty(
default = 30,
name = 'FPS',
description = "Frame rate for shot",
soft_max = 30,
min = 1, max = 120)
ink = bpy.props.EnumProperty(
items = (('FS', 'Freestyle', 'Uses Freestyle Ink'),
('EN', 'Edge Node', 'Uses EdgeNode for Ink'),
('FE', 'FS + EN', 'Uses both Freestyle & EdgeNode for Ink'),
('NI', 'No Ink', 'Does not use ink (paint render used for final)'),
('CU', 'Custom', 'Custom setup, do not touch ink settings')),
default = 'CU',
name = 'Ink Type',
description = "Determines how ink will be handled in final shot render")
class ProjectPanel(bpy.types.Panel):
"""
Add a panel to the Properties-Scene screen with Project Settings.
"""
bl_idname = 'SCENE_PT_project'
bl_label = 'Project Properties'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'scene'
def draw(self, context):
pp = bpy.context.scene.project_properties
self.layout.label(text='Project Properties')
row = self.layout.row()
row.prop(pp, 'render_folder')
row = self.layout.row()
row.prop(pp, 'render_prefix')
row.prop(pp, 'designation')
self.layout.label(text='Render Range')
row = self.layout.row()
row.prop(pp, 'frame_start')
row.prop(pp, 'frame_end')
row.prop(pp, 'frame_rate')
self.layout.label(text='Extra')
row = self.layout.row()
row.prop(pp, 'role')
row.prop(pp, 'ink')
# Buttons
class LunaticsSceneProperties(bpy.types.PropertyGroup):
"""
Properties of the current scene.
NOTE: due to be replaced by 'ProjectProperties', using the schema data
retrieved by file_context.
"""
series_id = bpy.props.EnumProperty(
items=[
@ -331,21 +330,7 @@ class LunaticsScenePanel(bpy.types.Panel):
# Buttons
class RenderProfileSettings(bpy.types.PropertyGroup):
"""
Settings for Render Profiles control.
NOTE: currently (0.2.6) uses hard-coded values. Planned to
switch to project-defined values.
"""
render_profile = bpy.props.EnumProperty(
name='Profile',
items=[(k, v['name'], v['desc'])
for k,v in render_profile_table.items()],
description="Select from pre-defined profiles of render settings",
default='full')
class RenderProfilesOperator(bpy.types.Operator):
"""
@ -357,9 +342,9 @@ class RenderProfilesOperator(bpy.types.Operator):
def invoke(self, context, event):
scene = context.scene
profile = render_profile_table[scene.render_profile_settings.render_profile]
profile = scene.render_profile_settings.render_profile
render_profile.set_render_from_profile(scene, profile)
BlendFile.render_profiles.apply(scene, profile)
return {'FINISHED'}
@ -525,13 +510,7 @@ class lunatics_compositing(bpy.types.Operator):
shot.cfg_scene()
return {'FINISHED'}
# def draw(self, context):
# settings = context.scene.lx_compos_settings
# self.col = self.layout.col()
# col.prop(settings, "inkthru", text="Ink Thru")
# col.prop(settings, "billboards", text="Ink Thru")
class LunaticsPanel(bpy.types.Panel):
@ -555,7 +534,17 @@ class LunaticsPanel(bpy.types.Panel):
BlendFile = file_context.FileContext()
class RenderProfileSettings(bpy.types.PropertyGroup):
"""
Settings for Render Profiles control.
"""
render_profile = bpy.props.EnumProperty(
name='Profile',
items=render_profile.blender_enum_lookup,
description="Select from render profiles defined in project")
@persistent
def update_handler(ctxt):
"""
@ -569,6 +558,10 @@ def register():
bpy.types.Scene.lunaprops = bpy.props.PointerProperty(type=LunaticsSceneProperties)
bpy.utils.register_class(LunaticsScenePanel)
bpy.utils.register_class(ProjectProperties)
bpy.types.Scene.project_properties = bpy.props.PointerProperty(type=ProjectProperties)
bpy.utils.register_class(ProjectPanel)
bpy.utils.register_class(RenderProfileSettings)
bpy.types.Scene.render_profile_settings = bpy.props.PointerProperty(
type=RenderProfileSettings)
@ -593,6 +586,8 @@ def unregister():
bpy.utils.unregister_class(LunaticsSceneProperties)
bpy.utils.unregister_class(LunaticsScenePanel)
bpy.utils.unregister_class(ProjectProperties)
bpy.utils.unregister_class(RenderProfileSettings)
bpy.utils.unregister_class(RenderProfilesOperator)
bpy.utils.unregister_class(RenderProfilesPanel)

View File

@ -35,7 +35,7 @@ from .accumulate import RecursiveDict
from .enum import Enum
from .ranks import RankNotFound
from .parsers import NameParsers
from abx.parsers import NameParsers
log_level = Enum('DEBUG', 'INFO', 'WARNING', 'ERROR')
@ -43,6 +43,8 @@ log_level = Enum('DEBUG', 'INFO', 'WARNING', 'ERROR')
from .name_schema import FieldSchema
from .name_context import NameContext
#from .render_profile import RenderProfileMap
class FileContext(NameContext):
"""
@ -244,6 +246,7 @@ class FileContext(NameContext):
# Defaults
self.provided_data = RecursiveDict(DEFAULT_YAML, source='default')
self.abx_fields = DEFAULT_YAML['abx']
self.render_profiles = {} #RenderProfileMap()
def clear_notes(self):
"""
@ -294,6 +297,9 @@ class FileContext(NameContext):
# Did we find the YAML data for the project?
# Did we find the project root?
self.render_profiles = self.abx_fields['render_profiles']
#self.render_profiles = RenderProfileMap(self.abx_fields['render_profiles'])
# TODO: Bug?
# Note that 'project_schema' might not be correct if overrides are given.
# As things are, I think it will simply append the overrides, and this

490
abx/parsers.py Normal file
View File

@ -0,0 +1,490 @@
# parsers (sub-package)
"""
Filename Parsers & Registry for FileContext.
"""
import re, copy, os
import yaml
NameParsers = {} # Parser registry
def registered_parser(parser):
"""
Decorator function to register a parser class.
"""
NameParsers[parser.name] = parser
return parser
wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)')
@registered_parser
class Parser_ABX_Episode:
"""
Original "Lunatics!" filename parsing algorithm. (DEPRECATED)
This parser was written before the Schema parser. It hard-codes the schema used
in the "Lunatics!" Project, and can probably be safely replaced by using the Schema
parser with appropriate YAML settings in the <project>.yaml file, which also allows
much more flexibility in naming schemes.
YAML parameter settings available for this parser:
---
definitions:
parser: abx_episode # Force use of this parser
parser_options: # Available settings (w/ defaults)
field_separator: '-'
episode_separator: 'E'
filetype_separator: '.'
Filetypes and roles are hard-code, and can't be changed from the YAML.
Assumes field-based filenames of the form:
<series>E<episode>[-<seq>[-<block>[-Cam<camera>][-<shot>]]][-<title>]-<role>.<filetype>
Where the <field> indicates fields with fieldnames, and there are three expected separators:
- is the 'field_separator'
E is the 'episode_separator'
. is the 'filetype_separator'
(These can be overridden in the initialization).
The class is callable, taking a string as input and returning a dictionary of fields.
"""
name = 'abx_episode'
max_score = 10 # Maximum number of fields parsed
# supported values for filetype
filetypes = {
'blend': "Blender File",
'kdenlive': "Kdenlive Video Editor File",
'mlt': "Kdenlive Video Mix Script",
'svg': "Scalable Vector Graphics (Inkscape)",
'kra': "Krita Graphic File",
'xcf': "Gimp Graphic File",
'png': "Portable Network Graphics (PNG) Image",
'jpg': "Joint Photographic Experts Group (JPEG) Image",
'aup': "Audacity Project",
'ardour': "Ardour Project",
'flac': "Free Lossless Audio Codec (FLAC)",
'mp3': "MPEG Audio Layer III (MP3) Audio File",
'ogg': "Ogg Vorbis Audio File",
'avi': "Audio Video Interleave (AVI) Video Container",
'mkv': "Matroska Video Container",
'mp4': "Moving Picture Experts Group (MPEG) 4 Format}",
'txt': "Plain Text File"
}
# Roles that make sense in an episode context
roles = {
'extras': "Extras, crowds, auxillary animated movement",
'mech': "Mechanical animation",
'anim': "Character animation",
'cam': "Camera direction",
'vfx': "Visual special effects",
'compos': "Compositing",
'bkg': "Background 2D image",
'bb': "Billboard 2D image",
'tex': "Texture 2D image",
'foley': "Foley sound",
'voice': "Voice recording",
'fx': "Sound effects",
'music': "Music track",
'cue': "Musical cue",
'amb': "Ambient sound",
'loop': "Ambient sound loop",
'edit': "Video edit"
}
# A few filetypes imply their roles:
roles_by_filetype = {
'kdenlive': 'edit',
'mlt': 'edit'
}
def __init__(self, field_separator='-', episode_separator='E', filetype_separator='.',
fields=None, filetypes=None, roles=None, **kwargs):
if not fields:
fields = {}
if filetypes:
self.filetypes = copy.deepcopy(self.filetypes) # Copy class attribute to instance
self.filetypes.update(filetypes) # Update with new values
if roles:
self.roles = copy.deepcopy(self.roles) # Copy class attribute to instance
self.roles.update(roles) # Update with new values
self.field_separator = field_separator
self.episode_separator = episode_separator
self.filetype_separator = filetype_separator
def __call__(self, filename, namepath):
score = 0.0
fielddata = {}
# Check for filetype ending
i_filetype = filename.rfind(self.filetype_separator)
if i_filetype < 0:
fielddata['filetype'] = None
else:
fielddata['filetype'] = filename[i_filetype+1:]
filename = filename[:i_filetype]
score = score + 1.0
components = filename.split(self.field_separator)
# Check for role marker in last component
if components[-1] in self.roles:
fielddata['role'] = components[-1]
del components[-1]
fielddata['hierarchy'] = 'episode'
score = score + 2.0
elif fielddata['filetype'] in self.roles_by_filetype:
fielddata['role'] = self.roles_by_filetype[fielddata['filetype']]
fielddata['hierarchy'] = 'episode'
else:
fielddata['role'] = None
fielddata['hierarchy'] = None
# Check for a descriptive title (must be 3+ characters in length)
if components and len(components[-1])>2:
# Normalize the title as words with spaces
title = ' '.join(w for w in wordre.split(components[-1]) if wordre.fullmatch(w))
del components[-1]
score = score + 1.0
else:
title = None
# Check if first field contains series/episode number
if components:
prefix = components[0]
try:
fielddata['series'] = {}
fielddata['episode'] = {}
fielddata['series']['code'], episode_id = prefix.split(self.episode_separator)
fielddata['episode']['code'] = int(episode_id)
fielddata['rank'] = 'episode'
del components[0]
score = score + 2.0
except:
pass
# Check for sequence/block/shot/camera designations
if components:
fielddata['seq'] = {}
fielddata['seq']['code'] = components[0]
fielddata['rank'] = 'seq'
del components[0]
score = score + 1.0
if components:
try:
fielddata['block'] = {}
fielddata['block']['code'] = int(components[0])
del components[0]
fielddata['rank'] = 'block'
score = score + 1.0
except:
pass
if components and components[0].startswith('Cam'):
fielddata['camera'] = {}
fielddata['camera']['code'] = components[0][len('Cam'):]
fielddata['rank'] = 'camera'
del components[0]
score = score + 1.0
if components:
# Any remaining structure is joined back to make the shot ID
fielddata['shot'] = {}
fielddata['shot']['code'] = ''.join(components)
fielddata['rank'] = 'shot'
components = None
score = score + 1.0
if title and fielddata['rank'] in fielddata:
fielddata[fielddata['rank']]['title'] = title
return score/self.max_score, fielddata
DEFAULT_YAML = {}
with open(os.path.join(os.path.dirname(__file__), 'abx.yaml')) as def_yaml_file:
DEFAULT_YAML.update(yaml.safe_load(def_yaml_file))
@registered_parser
class Parser_ABX_Fallback(object):
"""
Highly-tolerant parser to fall back on if others fail.
The fallback parser makes only a very minimal and robust set of assumptions.
Any legal filename will successfully return a simple parse, though much
interpretation may be lost. It still allows for common field-based practices,
but falls back on using the unaltered filename if necessary.
YAML options available:
---
definitions:
parser: abx_fallback # Force use of this parser.
There are no other options. Field separators are defined very broadly,
and include most non-word characters (~#$!=+&_-). This was mostly designed
to work without a project schema available.
"""
name = 'abx_fallback'
filetypes = DEFAULT_YAML['definitions']['filetypes']
roles = DEFAULT_YAML['definitions']['roles']
roles_by_filetype = (
DEFAULT_YAML['definitions']['roles_by_filetype'])
main_sep_re = re.compile(r'\W+') # Any single non-word char
comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+')
def __init__(self, **kwargs):
pass
def _parse_ending(self, filename, separator):
try:
remainder, suffix = filename.rsplit(separator, 1)
score = 1.0
except ValueError:
remainder = filename
suffix = None
score = 0.0
return (suffix, remainder, score)
def __call__(self, filename, namepath):
fields = {}
score = 1.0
possible = 4.5
split = filename.rsplit('.', 1)
if len(split)<2 or split[1] not in self.filetypes:
fields['filetype'] = None
remainder = filename
score += 1.0
else:
fields['filetype'] = split[1]
remainder = split[0]
comment_match = self.comment_sep_re.search(remainder)
if comment_match:
fields['comment'] = remainder[comment_match.end():]
remainder = remainder[:comment_match.start()]
else:
fields['comment'] = None
role = self.main_sep_re.split(remainder)[-1]
if role in self.roles:
fields['role'] = role
remainder = remainder[:-1-len(role)]
score += 1.0
else:
fields['role'] = None
# Implied role
if fields['filetype'] in self.roles_by_filetype:
fields['role'] = self.roles_by_filetype[fields['filetype']]
score += 1.0
words = self.main_sep_re.split(remainder)
fields['code'] = ''.join([w.capitalize() for w in words])
fields['title'] = remainder
return score/possible, fields
@registered_parser
class Parser_ABX_Schema(object):
"""
Parser based on using the list of schemas.
The schemas are normally defined in the project root directory YAML.
Expands on the 'abx_episode' parser by allowing all the schema to
be defined by outside configuration data (generally provided in a
project YAML file, but this module does not depend on the data
source used).
The project YAML can additionally control parsing with this parser:
---
definitions:
parser: abx_schema # Force use of this parser
parser_options: # Set parameters
filetype_separator: '.'
comment_separator: '--'
role_separator: '-'
title_separator: '-'
filetypes: # Recognized filetypes.
blend: Blender File # <filetype>: documentation
...
roles: # Recognized role fields.
anim: Character Animation # <role>: documentation
...
roles_by_filetype: # Roles implied by filetype.
kdenlive: edit # <filetype>:<role>
...
(For the full default lists see abx/abx.yaml).
schemas (list): The current schema-list defining how filenames should be parsed.
This "Schema" parser uses this to determine both parsing and
mapping of text fields in the filename.
definitions(dict): The project definitions currently visible to the parser.
"""
name = 'abx_schema'
def __init__(self, schemas=None, definitions=None,
filetype_separator = '.',
comment_separator = '--',
role_separator = '-',
title_separator = '-',
**kwargs):
self.filetype_separator = filetype_separator
self.comment_separator = comment_separator
self.role_separator = role_separator
self.title_separator = title_separator
self.schemas = schemas
if 'roles' in definitions:
self.roles = definitions['roles']
else:
self.roles = []
if 'filetypes' in definitions:
self.filetypes = definitions['filetypes']
else:
self.filetypes = []
if 'roles_by_filetype' in definitions:
self.roles_by_filetype = definitions['roles_by_filetype']
else:
self.roles_by_filetype = []
def _parse_ending(self, filename, separator):
try:
remainder, suffix = filename.rsplit(separator, 1)
score = 1.0
except ValueError:
remainder = filename
suffix = None
score = 0.0
return (suffix, remainder, score)
def _parse_beginning(self, filename, separator):
try:
prefix, remainder = filename.split(separator, 1)
score = 1.0
except ValueError:
prefix = filename
remainder = ''
score = 0.0
return (prefix, remainder, score)
def __call__ (self, filename, namepath, debug=False):
fields = {}
score = 0.0
possible = 0.0
# First get specially-handled extensions
remainder = filename
field, newremainder, s = self._parse_ending(remainder, self.filetype_separator)
if field and field in self.filetypes:
remainder = newremainder
fields['filetype'] = field
score += s*1.0
else:
fields['filetype'] = None
field, remainder, s = self._parse_ending(remainder, self.comment_separator)
fields['comment'] = field
score += s*0.5
field, newremainder, s = self._parse_ending(remainder, self.role_separator)
if field and field in self.roles:
remainder = newremainder
fields['role'] = field
score += s*0.5
else:
fields['role'] = None
field, remainder, s = self._parse_ending(remainder, self.title_separator)
fields['title'] = field
score += s*0.5
possible += 3.0
# Implicit roles
if ( not fields['role'] and
fields['filetype'] and
fields['role'] in self.roles_by_filetype):
self.role = self.roles_by_filetype[fields['filetype']]
score += 0.2
#possible += 0.2
# Figure the rest out from the schema
# Find the matching rank start position for the filename
start = 0
for start, (schema, name) in enumerate(zip(self.schemas, namepath)):
field, r, s = self._parse_beginning(remainder, schema.delimiter)
try:
if field.lower() == schema.format.format(name).lower():
score += 1.0
break
except ValueError:
print(' (365) field, format', field, schema.format)
possible += 1.0
# Starting from that position, try to match fields
# up to the end of the namepath (checking against it)
irank = 0
for irank, (schema, name) in enumerate(
zip(self.schemas[start:], namepath[start:])):
if not remainder: break
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
score += s
try:
if ( type(field) == str and
field.lower() == schema.format.format(name).lower()):
fields[schema.rank]={'code':field}
fields['rank'] = schema.rank
score += 1.0
except ValueError:
print(' (384) field, format', field, schema.format)
possible += 2.0
# Remaining fields are authoritative (doesn't affect score)
for schema in self.schemas[irank:]:
if not remainder: break
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
fields[schema.rank]={'code':field}
fields['rank'] = schema.rank
if 'rank' in fields:
fields[fields['rank']]['title'] = fields['title']
if not fields['role'] and fields['filetype'] in self.roles_by_filetype:
fields['role'] = self.roles_by_filetype[fields['filetype']]
return score/possible, fields

View File

@ -1,16 +0,0 @@
# parsers (sub-package)
"""
Filename Parsers & Registry for FileContext.
"""
NameParsers = {} # Parser registry
def registered_parser(parser):
"""
Decorator function to register a parser class.
"""
NameParsers[parser.name] = parser
return parser
from . import abx_episode, abx_fallback, abx_schema

View File

@ -1,204 +0,0 @@
# abx_episode.py
"""
Custom parser written for "Lunatics!" Project Episode files.
Superseded by 'abx_schema' parser (probably).
"""
import re, copy
from . import registered_parser
wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)')
@registered_parser
class Parser_ABX_Episode:
"""
Original "Lunatics!" filename parsing algorithm. (DEPRECATED)
This parser was written before the Schema parser. It hard-codes the schema used
in the "Lunatics!" Project, and can probably be safely replaced by using the Schema
parser with appropriate YAML settings in the <project>.yaml file, which also allows
much more flexibility in naming schemes.
YAML parameter settings available for this parser:
---
definitions:
parser: abx_episode # Force use of this parser
parser_options: # Available settings (w/ defaults)
field_separator: '-'
episode_separator: 'E'
filetype_separator: '.'
Filetypes and roles are hard-code, and can't be changed from the YAML.
Assumes field-based filenames of the form:
<series>E<episode>[-<seq>[-<block>[-Cam<camera>][-<shot>]]][-<title>]-<role>.<filetype>
Where the <field> indicates fields with fieldnames, and there are three expected separators:
- is the 'field_separator'
E is the 'episode_separator'
. is the 'filetype_separator'
(These can be overridden in the initialization).
The class is callable, taking a string as input and returning a dictionary of fields.
"""
name = 'abx_episode'
max_score = 10 # Maximum number of fields parsed
# supported values for filetype
filetypes = {
'blend': "Blender File",
'kdenlive': "Kdenlive Video Editor File",
'mlt': "Kdenlive Video Mix Script",
'svg': "Scalable Vector Graphics (Inkscape)",
'kra': "Krita Graphic File",
'xcf': "Gimp Graphic File",
'png': "Portable Network Graphics (PNG) Image",
'jpg': "Joint Photographic Experts Group (JPEG) Image",
'aup': "Audacity Project",
'ardour': "Ardour Project",
'flac': "Free Lossless Audio Codec (FLAC)",
'mp3': "MPEG Audio Layer III (MP3) Audio File",
'ogg': "Ogg Vorbis Audio File",
'avi': "Audio Video Interleave (AVI) Video Container",
'mkv': "Matroska Video Container",
'mp4': "Moving Picture Experts Group (MPEG) 4 Format}",
'txt': "Plain Text File"
}
# Roles that make sense in an episode context
roles = {
'extras': "Extras, crowds, auxillary animated movement",
'mech': "Mechanical animation",
'anim': "Character animation",
'cam': "Camera direction",
'vfx': "Visual special effects",
'compos': "Compositing",
'bkg': "Background 2D image",
'bb': "Billboard 2D image",
'tex': "Texture 2D image",
'foley': "Foley sound",
'voice': "Voice recording",
'fx': "Sound effects",
'music': "Music track",
'cue': "Musical cue",
'amb': "Ambient sound",
'loop': "Ambient sound loop",
'edit': "Video edit"
}
# A few filetypes imply their roles:
roles_by_filetype = {
'kdenlive': 'edit',
'mlt': 'edit'
}
def __init__(self, field_separator='-', episode_separator='E', filetype_separator='.',
fields=None, filetypes=None, roles=None, **kwargs):
if not fields:
fields = {}
if filetypes:
self.filetypes = copy.deepcopy(self.filetypes) # Copy class attribute to instance
self.filetypes.update(filetypes) # Update with new values
if roles:
self.roles = copy.deepcopy(self.roles) # Copy class attribute to instance
self.roles.update(roles) # Update with new values
self.field_separator = field_separator
self.episode_separator = episode_separator
self.filetype_separator = filetype_separator
def __call__(self, filename, namepath):
score = 0.0
fielddata = {}
# Check for filetype ending
i_filetype = filename.rfind(self.filetype_separator)
if i_filetype < 0:
fielddata['filetype'] = None
else:
fielddata['filetype'] = filename[i_filetype+1:]
filename = filename[:i_filetype]
score = score + 1.0
components = filename.split(self.field_separator)
# Check for role marker in last component
if components[-1] in self.roles:
fielddata['role'] = components[-1]
del components[-1]
fielddata['hierarchy'] = 'episode'
score = score + 2.0
elif fielddata['filetype'] in self.roles_by_filetype:
fielddata['role'] = self.roles_by_filetype[fielddata['filetype']]
fielddata['hierarchy'] = 'episode'
else:
fielddata['role'] = None
fielddata['hierarchy'] = None
# Check for a descriptive title (must be 3+ characters in length)
if components and len(components[-1])>2:
# Normalize the title as words with spaces
title = ' '.join(w for w in wordre.split(components[-1]) if wordre.fullmatch(w))
del components[-1]
score = score + 1.0
else:
title = None
# Check if first field contains series/episode number
if components:
prefix = components[0]
try:
fielddata['series'] = {}
fielddata['episode'] = {}
fielddata['series']['code'], episode_id = prefix.split(self.episode_separator)
fielddata['episode']['code'] = int(episode_id)
fielddata['rank'] = 'episode'
del components[0]
score = score + 2.0
except:
pass
# Check for sequence/block/shot/camera designations
if components:
fielddata['seq'] = {}
fielddata['seq']['code'] = components[0]
fielddata['rank'] = 'seq'
del components[0]
score = score + 1.0
if components:
try:
fielddata['block'] = {}
fielddata['block']['code'] = int(components[0])
del components[0]
fielddata['rank'] = 'block'
score = score + 1.0
except:
pass
if components and components[0].startswith('Cam'):
fielddata['camera'] = {}
fielddata['camera']['code'] = components[0][len('Cam'):]
fielddata['rank'] = 'camera'
del components[0]
score = score + 1.0
if components:
# Any remaining structure is joined back to make the shot ID
fielddata['shot'] = {}
fielddata['shot']['code'] = ''.join(components)
fielddata['rank'] = 'shot'
components = None
score = score + 1.0
if title and fielddata['rank'] in fielddata:
fielddata[fielddata['rank']]['title'] = title
return score/self.max_score, fielddata

View File

@ -1,105 +0,0 @@
# abx_fallback.py
"""
Fallback parser used in case others fail.
The fallback parser makes only a very minimal and robust set of assumptions.
Any legal filename will successfully return a simple parse, though much
interpretation may be lost. It still allows for common field-based practices,
but falls back on using the unaltered filename if necessary.
"""
import re, os
import yaml
from . import registered_parser
DEFAULT_YAML = {}
with open(os.path.join(os.path.dirname(__file__), '..', 'abx.yaml')) as def_yaml_file:
DEFAULT_YAML.update(yaml.safe_load(def_yaml_file))
@registered_parser
class Parser_ABX_Fallback(object):
"""
Highly-tolerant parser to fall back on if others fail.
Makes very minimal assumptions about filename structure.
YAML options available:
---
definitions:
parser: abx_fallback # Force use of this parser.
There are no other options. Field separators are defined very broadly,
and include most non-word characters (~#$!=+&_-). This was mostly designed
to work without a project schema available.
"""
name = 'abx_fallback'
filetypes = DEFAULT_YAML['definitions']['filetypes']
roles = DEFAULT_YAML['definitions']['roles']
roles_by_filetype = (
DEFAULT_YAML['definitions']['roles_by_filetype'])
main_sep_re = re.compile(r'\W+') # Any single non-word char
comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+')
def __init__(self, **kwargs):
pass
def _parse_ending(self, filename, separator):
try:
remainder, suffix = filename.rsplit(separator, 1)
score = 1.0
except ValueError:
remainder = filename
suffix = None
score = 0.0
return (suffix, remainder, score)
def __call__(self, filename, namepath):
fields = {}
score = 1.0
possible = 4.5
split = filename.rsplit('.', 1)
if len(split)<2 or split[1] not in self.filetypes:
fields['filetype'] = None
remainder = filename
score += 1.0
else:
fields['filetype'] = split[1]
remainder = split[0]
comment_match = self.comment_sep_re.search(remainder)
if comment_match:
fields['comment'] = remainder[comment_match.end():]
remainder = remainder[:comment_match.start()]
else:
fields['comment'] = None
role = self.main_sep_re.split(remainder)[-1]
if role in self.roles:
fields['role'] = role
remainder = remainder[:-1-len(role)]
score += 1.0
else:
fields['role'] = None
# Implied role
if fields['filetype'] in self.roles_by_filetype:
fields['role'] = self.roles_by_filetype[fields['filetype']]
score += 1.0
words = self.main_sep_re.split(remainder)
fields['code'] = ''.join([w.capitalize() for w in words])
fields['title'] = remainder
return score/possible, fields

View File

@ -1,189 +0,0 @@
# abx_schema.py
"""
Generalized fields-based parser based on provided schema.
Expands on the 'abx_episode' parser by allowing all the schema to
be defined by outside configuration data (generally provided in a
project YAML file, but this module does not depend on the data
source used).
"""
from . import registered_parser
@registered_parser
class Parser_ABX_Schema(object):
"""
Parser based on using the list of schemas.
The schemas are normally defined in the project root directory YAML.
The project YAML can additionally control parsing with this parser:
---
definitions:
parser: abx_schema # Force use of this parser
parser_options: # Set parameters
filetype_separator: '.'
comment_separator: '--'
role_separator: '-'
title_separator: '-'
filetypes: # Recognized filetypes.
blend: Blender File # <filetype>: documentation
...
roles: # Recognized role fields.
anim: Character Animation # <role>: documentation
...
roles_by_filetype: # Roles implied by filetype.
kdenlive: edit # <filetype>:<role>
...
(For the full default lists see abx/abx.yaml).
schemas (list): The current schema-list defining how filenames should be parsed.
This "Schema" parser uses this to determine both parsing and
mapping of text fields in the filename.
definitions(dict): The project definitions currently visible to the parser.
"""
name = 'abx_schema'
def __init__(self, schemas=None, definitions=None,
filetype_separator = '.',
comment_separator = '--',
role_separator = '-',
title_separator = '-',
**kwargs):
self.filetype_separator = filetype_separator
self.comment_separator = comment_separator
self.role_separator = role_separator
self.title_separator = title_separator
self.schemas = schemas
if 'roles' in definitions:
self.roles = definitions['roles']
else:
self.roles = []
if 'filetypes' in definitions:
self.filetypes = definitions['filetypes']
else:
self.filetypes = []
if 'roles_by_filetype' in definitions:
self.roles_by_filetype = definitions['roles_by_filetype']
else:
self.roles_by_filetype = []
def _parse_ending(self, filename, separator):
try:
remainder, suffix = filename.rsplit(separator, 1)
score = 1.0
except ValueError:
remainder = filename
suffix = None
score = 0.0
return (suffix, remainder, score)
def _parse_beginning(self, filename, separator):
try:
prefix, remainder = filename.split(separator, 1)
score = 1.0
except ValueError:
prefix = filename
remainder = ''
score = 0.0
return (prefix, remainder, score)
def __call__ (self, filename, namepath, debug=False):
fields = {}
score = 0.0
possible = 0.0
# First get specially-handled extensions
remainder = filename
field, newremainder, s = self._parse_ending(remainder, self.filetype_separator)
if field and field in self.filetypes:
remainder = newremainder
fields['filetype'] = field
score += s*1.0
else:
fields['filetype'] = None
field, remainder, s = self._parse_ending(remainder, self.comment_separator)
fields['comment'] = field
score += s*0.5
field, newremainder, s = self._parse_ending(remainder, self.role_separator)
if field and field in self.roles:
remainder = newremainder
fields['role'] = field
score += s*0.5
else:
fields['role'] = None
field, remainder, s = self._parse_ending(remainder, self.title_separator)
fields['title'] = field
score += s*0.5
possible += 3.0
# Implicit roles
if ( not fields['role'] and
fields['filetype'] and
fields['role'] in self.roles_by_filetype):
self.role = self.roles_by_filetype[fields['filetype']]
score += 0.2
#possible += 0.2
# Figure the rest out from the schema
# Find the matching rank start position for the filename
start = 0
for start, (schema, name) in enumerate(zip(self.schemas, namepath)):
field, r, s = self._parse_beginning(remainder, schema.delimiter)
try:
if field.lower() == schema.format.format(name).lower():
score += 1.0
break
except ValueError:
print(' (365) field, format', field, schema.format)
possible += 1.0
# Starting from that position, try to match fields
# up to the end of the namepath (checking against it)
irank = 0
for irank, (schema, name) in enumerate(
zip(self.schemas[start:], namepath[start:])):
if not remainder: break
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
score += s
try:
if ( type(field) == str and
field.lower() == schema.format.format(name).lower()):
fields[schema.rank]={'code':field}
fields['rank'] = schema.rank
score += 1.0
except ValueError:
print(' (384) field, format', field, schema.format)
possible += 2.0
# Remaining fields are authoritative (doesn't affect score)
for schema in self.schemas[irank:]:
if not remainder: break
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
fields[schema.rank]={'code':field}
fields['rank'] = schema.rank
if 'rank' in fields:
fields[fields['rank']]['title'] = fields['title']
if not fields['role'] and fields['filetype'] in self.roles_by_filetype:
fields['role'] = self.roles_by_filetype[fields['filetype']]
return score/possible, fields

View File

@ -18,6 +18,8 @@ might forget to set things back up for a final render after I did a previz
animation.
"""
import os
import bpy
import bpy, bpy.types, bpy.utils, bpy.props
@ -25,6 +27,32 @@ from abx import ink_paint
from . import file_context
class RenderProfileMap(dict):
"""
Specialized dictionary for mapping Render profile names to profiles.
"""
def __init__(self, profile_map=None):
self._blender_enum = []
if not profile_map:
profile_map = {}
for key in profile_map:
self[key] = RenderProfile(key, profile_map[key])
for key in self.keys():
self._blender_enum.append((key, self[key].name, self[key].desc))
def keys(self):
return sorted(super().keys())
def blender_enum(self):
return self._blender_enum
def apply(self, scene, key):
self[key].apply(scene)
def blender_enum_lookup(self, context):
from abx import BlendFile
return RenderProfileMap(BlendFile.render_profiles).blender_enum()
class RenderProfile(object):
"""
@ -34,6 +62,12 @@ class RenderProfile(object):
loaded from a project YAML file (under the key 'render_profiles').
Attributes:
name (str):
Drop-down name for profile.
desc (str):
Longer descriptive name used for tooltips in the UI.
engine (str):
Mandatory choice of engine. Some aliases are supported, but the
standard values are: 'gl', meaning a setup for GL viewport
@ -148,13 +182,23 @@ class RenderProfile(object):
}
def __init__(self, fields):
def __init__(self, code, fields):
# Note: Settings w/ value *None* are left unaltered
# That is, they remain whatever they were before
# If a setting isn't included in the fields, then
# the attribute will be *None*.
if 'name' in fields:
self.name = fields['name']
else:
self.name = code
if 'desc' in fields:
self.desc = fields['desc']
else:
self.desc = code
if 'engine' not in fields:
fields['engine'] = None
@ -242,74 +286,17 @@ class RenderProfile(object):
if self.format:
# prefix = scene.name_context.render_path
# prefix = BlendfileContext.name_contexts[scene.name_context].render_path
prefix = 'path_to_render' # We actually need to get this from NameContext
prefix = os.path.join(
scene.project_properties.render_folder,
scene.project_properties.render_prefix)
if self.suffix:
scene.render.filepath = (prefix + '-' + self.suffix + '-' +
'f'+('#'*self.framedigits) + '.' +
self.render_formats[self.format][1])
else:
scene.render.filepath = (prefix + '-f'+('#'*self.framedigits) + '.' +
self.render_formats[self.format][1])
# def set_render_from_profile(scene, profile):
# if 'engine' in profile:
# if profile['engine'] == 'gl':
# pass
# elif profile['engine'] == 'bi':
# scene.render.engine = 'BLENDER_RENDER'
# elif profile['engine'] == 'cycles':
# scene.render.engine = 'CYCLES'
# elif profile['engine'] == 'bge':
# scene.render.engine = 'BLENDER_GAME'
#
# if 'fps' in profile:
# scene.render.fps = profile['fps']
#
# if 'fps_skip' in profile:
# scene.frame_step = profile['fps_skip']
#
# if 'format' in profile:
# scene.render.image_settings.file_format = render_formats[profile['format']][0]
#
# if 'freestyle' in profile:
# scene.render.use_freestyle = profile['freestyle']
#
# if 'antialias' in profile:
# if profile['antialias']:
# scene.render.use_antialiasing = True
# if profile['antialias'] in (5,8,11,16):
# scene.render.antialiasing_samples = str(profile['antialias'])
# else:
# scene.render.use_antialiasing = False
#
# if 'motionblur' in profile:
# if profile['motionblur']:
# scene.render.use_motion_blur = True
# if type(profile['motionblur'])==int:
# scene.render.motion_blur_samples = profile['motionblur']
# else:
# scene.render.use_motion_blur = False
#
# # Use Lunatics naming scheme for render target:
# if 'framedigits' in profile:
# framedigits = profile['framedigits']
# else:
# framedigits = 5
#
# if 'suffix' in profile:
# suffix = profile['suffix']
# else:
# suffix = ''
#
# if 'format' in profile:
# rdr_fmt = render_formats[profile['format']][0]
# ext = render_formats[profile['format']][1]
# else:
# rdr_fmt = 'PNG'
# ext = 'png'
#
# path = ink_paint.LunaticsShot(scene).render_path(
# suffix=suffix, framedigits=framedigits, ext=ext, rdr_fmt=rdr_fmt)
#
# scene.render.filepath = path

View File

@ -12,6 +12,8 @@ bl_info = {
"category": "Object",
}
blender_present = False
try:
# These are protected so we can read the add-on metadata from my
@ -25,6 +27,8 @@ except ImportError:
if blender_present:
from . import abx_ui
BlendFile = abx_ui.BlendFile
def register():
abx_ui.register()
#bpy.utils.register_module(__name__)

View File

@ -58,7 +58,7 @@ definitions:
abx:
render_profiles:
previz:
name: PreViz,
name: PreViz
desc: 'GL/AVI Previz Render for Animatics'
engine: gl
version: any
@ -66,7 +66,7 @@ abx:
fps_div: 1000
fps_skip: 1
suffix: GL
format: AVI_JPEG
format: AVI
extension: avi
freestyle: False
@ -77,7 +77,7 @@ abx:
fps: 30
fps_skip: 3
suffix: PT
format: AVI_JPEG
format: AVI
extension: avi
freestyle: False,
antialias: False,
@ -90,7 +90,7 @@ abx:
fps: 30
fps_skip: 30
suffix: CH
format: JPEG
format: JPG
extension: jpg
framedigits: 5
freestyle: True

View File

@ -26,25 +26,14 @@ run into.
import os
import bpy, bpy.utils, bpy.types, bpy.props
from bpy.app.handlers import persistent
from . import file_context
# if bpy.data.filepath:
# BlendfileContext = file_context.FileContext(bpy.data.filepath)
# else:
# BlendfileContext = file_context.FileContext()
#
# abx_data = BlendfileContext.abx_data
from . import copy_anim
from . import std_lunatics_ink
from . import ink_paint
from . import render_profile
#configfile = os.path.join(os.path.dirname(__file__), 'config.yaml')
#print("Configuration file path: ", os.path.abspath(configfile))
# Lunatics Scene Panel
# Lunatics file/scene properties:
@ -110,6 +99,11 @@ seq_id_table = {
def get_seq_ids(self, context):
"""
Specific function to retrieve enumerated values for sequence units.
NOTE: due to be replaced by file_context features.
"""
#
# Note: To avoid the reference bug mentioned in the Blender documentation,
# we only return values held in the global seq_id_table, which
@ -128,92 +122,116 @@ def get_seq_ids(self, context):
seq_enum_items = [(s, s, seq_id_table[series,episode][s]) for s in seq_ids]
return seq_enum_items
# Another hard-coded table -- for render profiles
render_profile_table = {
'previz': {
'name': 'PreViz',
'desc': 'GL/AVI Previz Render for Animatics',
'engine':'gl',
'version':'any',
'fps': 30,
'fps_div': 1000,
'fps_skip': 1,
'suffix': 'GL',
'format': 'AVI',
'freestyle': False
},
class ProjectProperties(bpy.types.PropertyGroup):
"""
Properties of the scene (and file), based on project context information.
"""
name_context_id = bpy.props.StringProperty(options={'HIDDEN', 'LIBRARY_EDITABLE'})
'paint6': {
'name': '6fps Paint',
'desc': '6fps Simplified Paint-Only Render',
'engine':'bi',
'fps': 30,
'fps_skip': 5,
'suffix': 'P6',
'format': 'AVI',
'freestyle': False,
'antialias': False,
'motionblur': False
},
'paint3': {
'name': '3fps Paint',
'desc': '3fps Simplified Paint-Only Render',
'engine': 'bi',
'fps': 30,
'fps_skip': 10,
'suffix': 'P3',
'format': 'AVI',
'freestyle': False,
'antialias': False,
'motionblur': False,
},
'paint': {
'name': '30fps Paint',
'desc': '30fps Simplified Paint-Only Render',
'engine': 'bi',
'fps': 30,
'fps_skip': 1,
'suffix': 'PT',
'format': 'AVI',
'freestyle': False,
'antialias': False,
'motionblur': False
},
'check': {
'name': '1fps Check',
'desc': '1fps Full-Features Check Renders',
'engine': 'bi',
'fps': 30,
'fps_skip': 30,
'suffix': 'CH',
'format': 'JPG',
'framedigits': 5,
'freestyle': True,
'antialias': 8
},
'full': {
'name': '30fps Full',
'desc': 'Full Render with all Features Turned On',
'engine': 'bi',
'fps': 30,
'fps_skip': 1,
'suffix': '',
'format': 'PNG',
'framedigits': 5,
'freestyle': True,
'antialias': 8
},
}
@property
def name_context(self):
if self.name_context_id in BlendFile.name_contexts:
return BlendFile.name_contexts[self.name_context_id]
else:
name_context = BlendFile.new_name_context()
self.name_context_id = str(id(name_context))
return name_context
render_folder = bpy.props.StringProperty(
name = 'Render Folder',
description = 'Path to the render folder (without filename)',
subtype = 'FILE_PATH')
render_prefix = bpy.props.StringProperty(
name = 'Render Prefix',
description = 'Prefix used to create filenames used in rendering',
subtype = 'FILE_NAME')
designation = bpy.props.StringProperty(
name = 'Designation',
description = 'Short code for this Blender scene only',
maxlen=16)
role = bpy.props.EnumProperty(
name = 'Role',
description = 'Role of this scene in project',
items = (('cam', 'Camera', 'Camera direction and render to EXR'),
('compos', 'Compositing', 'Post-compositing from EXR'),
('anim', 'Animation', 'Character animation scene'),
('mech', 'Mechanical', 'Mech animation scene'),
('asset', 'Asset', 'Project model assets'),
('prop', 'Prop', 'Stage property asset'),
('char', 'Character', 'Character model asset'),
('prac', 'Practical', 'Practical property - rigged prop')),
default='cam')
frame_start = bpy.props.IntProperty(
name = 'Start',
description = "Start frame of shot (used to set the render start frame)",
soft_min = 0, soft_max=10000)
frame_end = bpy.props.IntProperty(
name = 'End',
description = "End frame of shot (used to set the render end frame)",
soft_min = 0, soft_max=10000)
frame_rate = bpy.props.IntProperty(
default = 30,
name = 'FPS',
description = "Frame rate for shot",
soft_max = 30,
min = 1, max = 120)
ink = bpy.props.EnumProperty(
items = (('FS', 'Freestyle', 'Uses Freestyle Ink'),
('EN', 'Edge Node', 'Uses EdgeNode for Ink'),
('FE', 'FS + EN', 'Uses both Freestyle & EdgeNode for Ink'),
('NI', 'No Ink', 'Does not use ink (paint render used for final)'),
('CU', 'Custom', 'Custom setup, do not touch ink settings')),
default = 'CU',
name = 'Ink Type',
description = "Determines how ink will be handled in final shot render")
class ProjectPanel(bpy.types.Panel):
"""
Add a panel to the Properties-Scene screen with Project Settings.
"""
bl_idname = 'SCENE_PT_project'
bl_label = 'Project Properties'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'scene'
def draw(self, context):
pp = bpy.context.scene.project_properties
self.layout.label(text='Project Properties')
row = self.layout.row()
row.prop(pp, 'render_folder')
row = self.layout.row()
row.prop(pp, 'render_prefix')
row.prop(pp, 'designation')
self.layout.label(text='Render Range')
row = self.layout.row()
row.prop(pp, 'frame_start')
row.prop(pp, 'frame_end')
row.prop(pp, 'frame_rate')
self.layout.label(text='Extra')
row = self.layout.row()
row.prop(pp, 'role')
row.prop(pp, 'ink')
# Buttons
class LunaticsSceneProperties(bpy.types.PropertyGroup):
"""
Properties of the current scene.
NOTE: due to be replaced by 'ProjectProperties', using the schema data
retrieved by file_context.
"""
series_id = bpy.props.EnumProperty(
items=[
('S1', 'S1', 'Series One'),
@ -285,6 +303,8 @@ class LunaticsSceneProperties(bpy.types.PropertyGroup):
class LunaticsScenePanel(bpy.types.Panel):
"""
Add a panel to the Properties-Scene screen
NOTE: To be replaced by 'ProjectPropertiesPanel'.
"""
bl_idname = 'SCENE_PT_lunatics'
bl_label = 'Lunatics Project'
@ -310,18 +330,7 @@ class LunaticsScenePanel(bpy.types.Panel):
# Buttons
class RenderProfileSettings(bpy.types.PropertyGroup):
"""
Settings for Render Profiles control.
"""
render_profile = bpy.props.EnumProperty(
name='Profile',
items=[(k, v['name'], v['desc'])
for k,v in render_profile_table.items()],
description="Select from pre-defined profiles of render settings",
default='full')
class RenderProfilesOperator(bpy.types.Operator):
"""
@ -333,9 +342,9 @@ class RenderProfilesOperator(bpy.types.Operator):
def invoke(self, context, event):
scene = context.scene
profile = render_profile_table[scene.render_profile_settings.render_profile]
profile = scene.render_profile_settings.render_profile
render_profile.set_render_from_profile(scene, profile)
BlendFile.render_profiles.apply(scene, profile)
return {'FINISHED'}
@ -434,6 +443,11 @@ class copy_animation_settings(bpy.types.PropertyGroup):
class CharacterPanel(bpy.types.Panel):
"""
Features for working with characters and armatures.
Currently only includes the CopyAnimation operator.
"""
bl_space_type = "VIEW_3D" # window type panel is displayed in
bl_context = "objectmode"
bl_region_type = "TOOLS" # region of window panel is displayed in
@ -455,7 +469,7 @@ class CharacterPanel(bpy.types.Panel):
class lunatics_compositing_settings(bpy.types.PropertyGroup):
"""
Settings for the LX compositor tool.
Settings for Ink/Paint Config.
"""
inkthru = bpy.props.BoolProperty(
name = "Ink-Thru",
@ -475,7 +489,7 @@ class lunatics_compositing_settings(bpy.types.PropertyGroup):
class lunatics_compositing(bpy.types.Operator):
"""
Set up standard Lunatics scene compositing.
Ink/Paint Config Operator.
"""
bl_idname = "scene.lunatics_compos"
bl_label = "Ink/Paint Config"
@ -488,7 +502,7 @@ class lunatics_compositing(bpy.types.Operator):
"""
scene = context.scene
shot = std_lunatics_ink.LunaticsShot(scene,
shot = ink_paint.LunaticsShot(scene,
inkthru=context.scene.lx_compos_settings.inkthru,
billboards=context.scene.lx_compos_settings.billboards,
sepsky=context.scene.lx_compos_settings.sepsky )
@ -496,16 +510,13 @@ class lunatics_compositing(bpy.types.Operator):
shot.cfg_scene()
return {'FINISHED'}
# def draw(self, context):
# settings = context.scene.lx_compos_settings
# self.col = self.layout.col()
# col.prop(settings, "inkthru", text="Ink Thru")
# col.prop(settings, "billboards", text="Ink Thru")
class LunaticsPanel(bpy.types.Panel):
"""
Ink/Paint Configuration panel.
"""
bl_space_type = "VIEW_3D"
bl_context = "objectmode"
bl_region_type = "TOOLS"
@ -520,6 +531,26 @@ class LunaticsPanel(bpy.types.Panel):
layout.prop(settings, 'inkthru', text="Ink-Thru")
layout.prop(settings, 'billboards', text="Billboards")
layout.prop(settings, 'sepsky', text="Separate Sky")
BlendFile = file_context.FileContext()
class RenderProfileSettings(bpy.types.PropertyGroup):
"""
Settings for Render Profiles control.
"""
render_profile = bpy.props.EnumProperty(
name='Profile',
items=render_profile.blender_enum_lookup,
description="Select from render profiles defined in project")
@persistent
def update_handler(ctxt):
"""
Keeps FileContext up-to-date with Blender file loaded.
"""
BlendFile.update(bpy.data.filepath)
def register():
@ -527,6 +558,10 @@ def register():
bpy.types.Scene.lunaprops = bpy.props.PointerProperty(type=LunaticsSceneProperties)
bpy.utils.register_class(LunaticsScenePanel)
bpy.utils.register_class(ProjectProperties)
bpy.types.Scene.project_properties = bpy.props.PointerProperty(type=ProjectProperties)
bpy.utils.register_class(ProjectPanel)
bpy.utils.register_class(RenderProfileSettings)
bpy.types.Scene.render_profile_settings = bpy.props.PointerProperty(
type=RenderProfileSettings)
@ -543,10 +578,16 @@ def register():
bpy.utils.register_class(lunatics_compositing)
bpy.utils.register_class(LunaticsPanel)
bpy.app.handlers.save_post.append(update_handler)
bpy.app.handlers.load_post.append(update_handler)
bpy.app.handlers.scene_update_post.append(update_handler)
def unregister():
bpy.utils.unregister_class(LunaticsSceneProperties)
bpy.utils.unregister_class(LunaticsScenePanel)
bpy.utils.unregister_class(ProjectProperties)
bpy.utils.unregister_class(RenderProfileSettings)
bpy.utils.unregister_class(RenderProfilesOperator)
bpy.utils.unregister_class(RenderProfilesPanel)

View File

@ -102,6 +102,81 @@ import yaml
wordre = re.compile(r'([A-Z]+[a-z]*|[a-z]+|[0-9]+)')
def merge_slices(slices):
"""
Given a list of slice objects, merge into minimum list of new slices to cover same elements.
The idea is to catch contiguous or overlapping slices and reduce them to a single slice.
Arguments:
slices (list(slice)): List of slices to be merged.
"""
if isinstance(slices, slice):
slices = [slices]
slices = list(slices)
ordered = sorted(slices, key = lambda a: a.start)
merged = []
while ordered:
s = ordered.pop(0)
while ordered and ordered[0].start <= s.stop:
r = ordered.pop(0)
s = slice(s.start, max(s.stop,r.stop))
merged.append(s)
return tuple(merged)
def update_slices(old_slices, new):
if isinstance(old_slices, slice):
old_slices = [old_slices]
new_slices = []
for old in old_slices:
if (old.start < new.start <= old.stop) and (new.stop >= old.stop):
# Leading overlap Old: |-----|
# New: |-----|
new_slices.append(slice(old.start, new.start))
elif (old.start <= new.stop < old.stop) and (new.start <= old.start):
# Trailing overlap Old: |-----|
# New: |-----|
new_slices.append(slice(new.stop, old.stop))
elif (new.start <= old.start) and (new.stop >= old.stop):
# Contains Old: |--|
# New: |------|
pass
elif (new.start > old.stop) or (new.stop < old.start):
# No overlap Old: |---|
# New: |---|
new_slices.append(old)
elif (old.start < new.start) and (new.stop < old.stop):
# Split Old: |-------|
# New: |--|
new_slices.append(slice(old.start,new.start))
new_slices.append(slice(new.stop, old.stop))
if len(new_slices)==1:
new_slices = new_slices[0]
elif len(new_slices)==0:
new_slices = None
else:
new_slices = tuple(new_slices)
return new_slices
def listable(val):
"""
Can val be coerced to UnionList?
"""
return ((isinstance(val, collections.abc.Sequence) or
isinstance(val, collections.abc.Set))
and not
(type(val) in (bytes, str)) )
def dictable(val):
"""
Can val be coerced to RecursiveDict?
"""
return isinstance(val, collections.abc.Mapping)
class OrderedSet(collections.abc.Set):
"""
List-based set from Python documentation example.
@ -144,12 +219,130 @@ class UnionList(list):
files, which may or may not contain repetitions for different uses, but
also makes accumulation idempotent (running the union twice will not
increase the size of the result, because no new values will be found).
Attributes:
source: A dictionary mapping source objects to slice objects
according to which union (or original definition) they
come from.
"""
def union(self, other):
def __init__(self, data, source=None, override=True):
self.source = {}
super().__init__(data)
if hasattr(data, 'source') and not override:
self.source = data.source.copy()
if source is not None and None in self.source:
self.source[source] = self.source[None]
del self.source[None]
else:
self.source[source] = slice(0,len(self))
# if source is None and hasattr(data, 'source'):
# self.source = data.source.copy()
# else:
# self.source[source] = slice(0,len(self))
def __repr__(self):
return "UnionList(%s)" % super().__repr__()
def __getitem__(self, query):
if isinstance(query, int) or isinstance(query, slice):
return super().__getitem__(query)
elif isinstance(query, tuple):
result = []
for element in query:
result.extend(super().__getitem__(element))
return result
elif query in self.source:
return self[self.source[query]]
else:
raise ValueError("No source %s, " % repr(query) +
"not a direct int, slice, or tuple of same.")
def union(self, other, source=None):
"""
Returns a combination of the current list with unique new options added.
Arguments:
other (list):
The other list from which new options will be taken.
source(hashable):
A provided object identifying the source of the new
information (can be any type -- will be stored in
the 'source' dictionary, along with the slice to
which it applies).
Returns:
A list with the original options and any unique new options from the
other list. This is intentionally asymmetric behave which results
in the union operation being idempotent, retaining the original order,
and emulating the set 'union' behavior, except that non-unique entries
in the original list will be unharmed.
"""
combined = UnionList(self)
combined.source = {}
old_len = len(combined)
# This is the actual union operation
j = old_len
new_elements = []
for element in other:
if element not in self:
combined.append(element)
new_elements.append(element)
combined.extend(new_elements)
combined.source = self.source.copy()
if source is None and hasattr(other, 'source'):
# Other is a UnionList and may have complex source information
for j, element in enumerate(new_elements):
for src in other.source:
if src not in self.source:
combined.source[src] = []
elif isinstance(self.source[src], slice):
combined.source[src] = [self.source[src]]
elif isinstance(self.source[src], tuple):
combined.source[src] = list(self.source[src])
if element in other[other.source[src]]:
combined.source[src].append(slice(old_len,old_len+j+1))
for src in combined.source:
combined.source[src] = merge_slices(combined.source[src])
if len(combined.source[src]) == 0:
del combined.source[src]
elif len(combined.source[src]) == 1:
combined.source[src] = combined.source[src][0]
else:
# Source-naive list, only explicitly provided source:
new_slice = slice(old_len, len(combined))
for src in self.source:
upd = update_slices(self.source[src], new_slice)
if upd:
combined.source[src] = upd
if source in self.source:
# If a source is used twice, we have to merge it
# into the existing slices for that source
if isinstance(self.source[source], slice):
new_slices = (self.source[source], new_slice)
elif isinstance(self.source[source], collections.Sequence):
new_slices = self.source[source] + (new_slice,)
new_slices = tuple(merge_slices(new_slices))
if len(new_slices) == 1:
combined.source[source] = new_slices[0]
else:
combined.source[source] = tuple(new_slices)
else:
combined.source[source] = new_slice
return combined
class RecursiveDict(collections.OrderedDict):
@ -160,34 +353,88 @@ class RecursiveDict(collections.OrderedDict):
as UnionLists and applying the union operation to combine them
(when the replacement value is also a list).
"""
def __init__(self, data=None, source=None, active_source=None):
self.active_source = active_source
self.source = {}
super().__init__()
if isinstance(data, collections.abc.Mapping):
self.update(data, source=source)
def clear(self):
"""
Clear the dictionary to an empty state.
"""
for key in self:
del self[key]
self.source = {}
def update(self, mapping):
for key in mapping:
def update(self, other, source=None):
"""
Load information from another dictionary / mapping object.
mapping (dict):
The dictionary (or any mapping object) from which the update
is made. It does not matter if the object is a RecursiveDict
or not, it will result in the same behavior.
Unlike an ordinary dictionary update, this version works recursively.
If a key exists in both this dictionary and the dictionary from
which the update is being made, and that key is itself a dictionary,
it will be combined in the same way, rather than simply being
overwritten at the top level.
If the shared key represents a list in both dictionaries, then it
will be combined using the list's union operation.
This behavior allows multiple, deeply-nested dictionary objects to
be overlaid one on top of the other in a idempotent way, without
clobbering most content.
There are issues that can happen if a dictionary value is replaced
with a list or a scalar in the update source.
"""
if source is None and hasattr(other, 'source'):
def get_source(key):
return other.source[key]
else:
def get_source(key):
return source
for key in other:
if key in self:
if (isinstance(self[key], collections.abc.Mapping) and
isinstance(mapping[key], collections.abc.Mapping)):
# Subdictionary
newvalue = RecursiveDict(self[key])
newvalue.update(RecursiveDict(mapping[key]))
self[key] = newvalue
elif ((isinstance(self[key], collections.abc.MutableSequence) or
isinstance(self[key], collections.abc.Set)) and
(isinstance(mapping[key], collections.abc.MutableSequence) or
isinstance(mapping[key], collections.abc.Set))):
# Sublist
self[key] = UnionList(self[key]).union(UnionList(mapping[key]))
old = self[key]
new = other[key]
if dictable(old) and dictable(new):
old.update(RecursiveDict(new), source=get_source(key))
elif listable(old) and listable(new):
self.__setitem__(key, old.union(new), source=self.source[key])
#self.__setitem__(key, old.union(UnionList(new)),
# source=self.source[key])
# self.__setitem__(key, old.union(UnionList(new),
# source=get_source(key)),
# source=self.source[key])
else: # scalar
self[key] = mapping[key]
self.__setitem__(key, other[key], source=get_source(key))
else: # new key
self[key] = mapping[key]
self.__setitem__(key, other[key], source=get_source(key))
def copy(self):
copy = RecursiveDict()
for key in self:
copy[key] = self[key]
for key in self.source:
copy.source[key] = self.source[key]
return copy
def get_data(self):
"""
Returns the contents stripped down to an ordinary Python dictionary.
"""
new = {}
for key in self:
if isinstance(self[key], RecursiveDict):
@ -198,13 +445,16 @@ class RecursiveDict(collections.OrderedDict):
new[key]=self[key]
return new
def __setitem__(self, key, value):
if isinstance(value, collections.abc.Mapping):
super().__setitem__(key, RecursiveDict(value))
def __setitem__(self, key, value, source=None):
if not source:
source = self.active_source
elif isinstance(value, collections.abc.MutableSequence):
super().__setitem__(key, UnionList(value))
self.source[key] = source
if dictable(value):
super().__setitem__(key, RecursiveDict(value, source=source))
elif listable(value):
super().__setitem__(key, UnionList(value, source=source, override=False))
else:
super().__setitem__(key,value)
@ -224,19 +474,31 @@ class RecursiveDict(collections.OrderedDict):
s = s + ')'
return s
def from_yaml(self, yaml_string):
self.update(yaml.safe_load(yaml_string))
def from_yaml(self, yaml_string, source=None):
"""
Initialize dictionary from YAML contained in a string.
"""
self.update(yaml.safe_load(yaml_string), source=source)
return self
def from_yaml_file(self, path):
"""
Initialize dictionary from a separate YAML file on disk.
"""
with open(path, 'rt') as yamlfile:
self.update(yaml.safe_load(yamlfile))
self.update(yaml.safe_load(yamlfile), source=path)
return self
def to_yaml(self):
"""
Serialize dictionary contents into a YAML string.
"""
return yaml.dump(self.get_data())
def to_yaml_file(self, path):
"""
Serialize dictionary contents to a YAML file on disk.
"""
with open(path, 'wt') as yamlfile:
yamlfile.write(yaml.dump(self.get_data()))
@ -255,11 +517,12 @@ def collect_yaml_files(path, stems, dirmatch=False, sidecar=False, root='/'):
Does not attempt to read or interpret the files.
@path: The starting point, typically the antecedent filename.
@stems: File stem (or sequence of stems) we recognize (in priority order).
@dirmatch: Also search for stems matching the containing directory name?
@sidecar: Also search for stems matching the antecent filename's stem?
@root: Top level directory to consider (do not search above this).
Arguments:
path: The starting point, typically the antecedent filename.
stems: File stem (or sequence of stems) we recognize (in priority order).
dirmatch: Also search for stems matching the containing directory name?
sidecar: Also search for stems matching the antecedent filename's stem?
root: Top level directory to consider (do not search above this).
"Stem" means the name with any extension after "." removed (typically,
the filetype).
@ -294,6 +557,16 @@ def collect_yaml_files(path, stems, dirmatch=False, sidecar=False, root='/'):
def has_project_root(yaml_path):
"""
Does the YAML file contain the 'project_root' key?
Arguments:
yaml_path (str): Filepath to the current YAML file being processed.
Returns:
Whether or not the file contains the 'project_root' key defining its
containing folder as the root folder for this project.
"""
with open(yaml_path, 'rt') as yaml_file:
data = yaml.safe_load(yaml_file)
if 'project_root' in data:
@ -302,12 +575,30 @@ def has_project_root(yaml_path):
return False
def trim_to_project_root(yaml_paths):
"""
Trim the path to the project root location.
Arguments:
yaml_paths (list[str]): The list of YAML file paths.
Returns:
Same list, but with any files above the project root removed.
"""
for i in range(len(yaml_paths)-1,-1,-1):
if has_project_root(yaml_paths[i]):
return yaml_paths[i:]
return yaml_paths
def get_project_root(yaml_paths):
"""
Get the absolute file system path to the root folder.
Arguments:
yaml_paths (list[str]): The list of YAML file paths.
Returns:
The absolute path to the top of the project.
"""
trimmed = trim_to_project_root(yaml_paths)
if trimmed:
return os.path.dirname(trimmed[0])
@ -316,13 +607,32 @@ def get_project_root(yaml_paths):
return '/'
def combine_yaml(yaml_paths):
"""
Merge a list of YAML texts into a single dictionary object.
Arguments:
yaml_paths (list[str]): The list of YAML file paths to be combined.
Returns:
A RecursiveDict containing the collected data.
"""
data = RecursiveDict()
for path in yaml_paths:
with open(path, 'rt') as yaml_file:
data.update(yaml.safe_load(yaml_file))
data.update(yaml.safe_load(yaml_file), source=path)
return data
def get_project_data(filepath):
"""
Collect the project data from the file system.
Arguments:
filepath (str): Path to the file.
Returns:
Data collected from YAML files going up the
tree to the project root.
"""
# First, get the KitCAT data.
kitcat_paths = collect_yaml_files(filepath,
('kitcat', 'project'), dirmatch=True, sidecar=True)

View File

@ -1,6 +1,28 @@
# copy_anim.py
"""
Blender Python code to copy animation between armatures or proxy armatures.
The purpose of the 'Copy Animation' feature is to allow for animation to be
copied from one armature to another, en masse, rather than having to individual
push and move action objects.
The main use for this is to repair files in which animated proxy rigs have
become incompatible or broken for some reason. Common examples include a name
change in the rig or armature object in a character asset file, extra bones
added, and so on. There is no simple way in Blender to update these proxies.
It is possible to create a new proxy, though, and with this tool to speed up
the process, the animation can be transferred to it all at once.
The tool also allows for the animation to be correctly copied and scaled by
a scale factor, so that animation can be copied from a proxy defined at one
scale to one defined at another.
This comes up when an animation file was built incorrectly at the wrong scale
and needs to be corrected, after animating has already begun.
The scaling feature has been tested on Rigify-based rigs, and resets the
bone constraints as needed, during the process.
"""
import bpy, bpy.types, bpy.utils, bpy.props

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,160 @@
# render_profile.py
"""
Blender Python code to set parameters based on render profiles.
The purpose of the "Render Profiles" feature is to simplify setting up
Blender to render animation according to a small number of standardized,
named profiles, instead of having to control each setting separately.
They're sort of like predefined radio buttons for your render settings.
I wrote this because I kept having to repeat the same steps to go from
quick "GL" or "Paint" renders at low frame rates to fully-configured
final renders, and I found the process was error-prone.
In particular, it was very easy to accidentally forget to change the render
filepath and have a previous render get overwritten! Or, alternatively, I
might forget to set things back up for a final render after I did a previz
animation.
"""
import os
import bpy
import bpy, bpy.types, bpy.utils, bpy.props
from . import std_lunatics_ink
from abx import ink_paint
from . import file_context
class RenderProfileMap(dict):
"""
Specialized dictionary for mapping Render profile names to profiles.
"""
def __init__(self, profile_map=None):
self._blender_enum = []
if not profile_map:
profile_map = {}
for key in profile_map:
self[key] = RenderProfile(key, profile_map[key])
for key in self.keys():
self._blender_enum.append((key, self[key].name, self[key].desc))
def keys(self):
return sorted(super().keys())
def blender_enum(self):
return self._blender_enum
def apply(self, scene, key):
self[key].apply(scene)
def blender_enum_lookup(self, context):
from abx import BlendFile
return RenderProfileMap(BlendFile.render_profiles).blender_enum()
class RenderProfile(object):
"""
A named set of render settings for Blender.
The profile is designed to be defined by a dictionary of fields, typically
loaded from a project YAML file (under the key 'render_profiles').
Attributes:
name (str):
Drop-down name for profile.
desc (str):
Longer descriptive name used for tooltips in the UI.
engine (str):
Mandatory choice of engine. Some aliases are supported, but the
standard values are: 'gl', meaning a setup for GL viewport
rendering, or one 'bi'/'BLENDER_INTERNAL', 'cycles'/'CYCLES',
or 'bge' / 'BLENDER_GAME' for rendering with the respective
engines. There is no support for Eevee, because this is a 2.7-only
Add-on. It should be included in the port. No third-party engines
are currently supported.
fps (float):
Frames-per-second.
fps_skip (int):
Frames to skip between rendered frames (effectively divides the
frame rate).
fps_divisor (float):
This is the weird hack for specifying NTSC-compliant fps of 29.97
by using 1.001 as a divisor, instead of 1.0. Avoid if you can!
rendersize (int):
Percentage size of defined pixel dimensions to render. Note that
we don't support setting the pixel size directly. You should
configure that in Blender, but you can use this feature to make
a lower-resolution render.
compress (int):
Compression ratio for image formats that support it.
format (str):
Image or video output format.
One of: 'PNG', 'JPG', 'EXR', 'AVI' or 'MKV'.
Note that we don't support the full range of options, just some
common ones for previz and final rendering.
freestyle (bool):
Whether to turn on Freestyle ink rendering.
antialiasing_samples (str):
Controlled by 'antialias' key, which can be a number: 5,8,11, or 16.
Note that this attribute, which is used to directly set the value
in Blender is a string, not an integer.
use_antialiasing (bool):
Controlled by 'antialias' key. Whether to turn on antialiasing.
Any value other than 'False' or 'None' will turn it on.
False turns it off. None leaves it as-is.
motion_blur_samples (int):
Controlled by 'motionblur' key, which can be a number determining
the number of samples.
use_motion_blur (bool):
Controlled by 'motionblur' key. Any value other than False or None
will turn on motion blur. A value of True turns it on without
changing the samples. A value of False turns it off. None causes
is to be left as-is.
framedigits (int):
The number of '#' characters to use in the render filename to
indicate frame number. Only used if the format is an image stream.
suffix (str):
A string suffix placed after the base name, but before the frame
number to indicate what profile was used for the render. This
avoids accidentally overwriting renders made with other profiles.
Note that these attributes are not intended to be manipulated directly
by the user. The production designer is expected to define these
profiles in the <project>.yaml file under the 'render_profiles' key,
like this:
render_profiles:
previz:
engine: gl
suffix: MP
fps: 30
fps_skip: 6
motionblur: False
antialias: False
freestyle: False
rendersize: 50
and so on. This is then loaded by ABX into a list of RenderProfile
objects. Calling the RenderProfile.apply() method actually causes the
settings to be made.
"""
render_formats = {
# VERY simplified and limited list of formats from Blender that we need:
# <API 'format'>: (<bpy file format>, <filename extension>),
@ -39,13 +182,23 @@ class RenderProfile(object):
}
def __init__(self, fields):
def __init__(self, code, fields):
# Note: Settings w/ value *None* are left unaltered
# That is, they remain whatever they were before
# If a setting isn't included in the fields, then
# the attribute will be *None*.
if 'name' in fields:
self.name = fields['name']
else:
self.name = code
if 'desc' in fields:
self.desc = fields['desc']
else:
self.desc = code
if 'engine' not in fields:
fields['engine'] = None
@ -104,6 +257,9 @@ class RenderProfile(object):
def apply(self, scene):
"""
Apply the profile settings to the given scene.
NOTE: in 0.2.6 this function isn't fully implemented, and the
render filepath will not include the proper unit name.
"""
if self.engine: scene.render.engine = self.engine
if self.fps: scene.render.fps = self.fps
@ -130,74 +286,17 @@ class RenderProfile(object):
if self.format:
# prefix = scene.name_context.render_path
# prefix = BlendfileContext.name_contexts[scene.name_context].render_path
prefix = 'path_to_render' # We actually need to get this from NameContext
prefix = os.path.join(
scene.project_properties.render_folder,
scene.project_properties.render_prefix)
if self.suffix:
scene.render.filepath = (prefix + '-' + self.suffix + '-' +
'f'+('#'*self.framedigits) + '.' +
self.render_formats[self.format][1])
else:
scene.render.filepath = (prefix + '-f'+('#'*self.framedigits) + '.' +
self.render_formats[self.format][1])
# def set_render_from_profile(scene, profile):
# if 'engine' in profile:
# if profile['engine'] == 'gl':
# pass
# elif profile['engine'] == 'bi':
# scene.render.engine = 'BLENDER_RENDER'
# elif profile['engine'] == 'cycles':
# scene.render.engine = 'CYCLES'
# elif profile['engine'] == 'bge':
# scene.render.engine = 'BLENDER_GAME'
#
# if 'fps' in profile:
# scene.render.fps = profile['fps']
#
# if 'fps_skip' in profile:
# scene.frame_step = profile['fps_skip']
#
# if 'format' in profile:
# scene.render.image_settings.file_format = render_formats[profile['format']][0]
#
# if 'freestyle' in profile:
# scene.render.use_freestyle = profile['freestyle']
#
# if 'antialias' in profile:
# if profile['antialias']:
# scene.render.use_antialiasing = True
# if profile['antialias'] in (5,8,11,16):
# scene.render.antialiasing_samples = str(profile['antialias'])
# else:
# scene.render.use_antialiasing = False
#
# if 'motionblur' in profile:
# if profile['motionblur']:
# scene.render.use_motion_blur = True
# if type(profile['motionblur'])==int:
# scene.render.motion_blur_samples = profile['motionblur']
# else:
# scene.render.use_motion_blur = False
#
# # Use Lunatics naming scheme for render target:
# if 'framedigits' in profile:
# framedigits = profile['framedigits']
# else:
# framedigits = 5
#
# if 'suffix' in profile:
# suffix = profile['suffix']
# else:
# suffix = ''
#
# if 'format' in profile:
# rdr_fmt = render_formats[profile['format']][0]
# ext = render_formats[profile['format']][1]
# else:
# rdr_fmt = 'PNG'
# ext = 'png'
#
# path = std_lunatics_ink.LunaticsShot(scene).render_path(
# suffix=suffix, framedigits=framedigits, ext=ext, rdr_fmt=rdr_fmt)
#
# scene.render.filepath = path

View File

@ -12,19 +12,19 @@ project_schema:
- rank: dept
type:
graphics: Graphic Art (2D)
models: Models (3D)
graphics: 'Graphic Art (2D)'
models: 'Models (3D)'
sound: Sound Effects
music: Music and Cues
voice: Voice Lines
stock: Assembled Stock Footage Elements
- rank: category
type: string
maxlength: 32
- rank: category
type: string
maxlength: 32
- rank: subcat
type: string
maxlength: 32
- rank: subcat
type: string
maxlength: 32

View File

@ -23,4 +23,59 @@ testdict:
b: 2
- a: 2
b: 3
render_profiles:
previz:
name: PreViz
desc: 'GL/AVI Previz Render for Animatics'
engine: gl
version: any
fps: 24
fps_skip: 1
suffix: GL
format: AVI
extension: avi
freestyle: False
quick:
name: 3fps Paint
desc: '24fps Simplified Paint-Only Render'
engine: bi
fps: 24
fps_skip: 8
suffix: PT
format: AVI
extension: avi
freestyle: False,
antialias: False,
motionblur: False
check:
name: 1fps Check
desc: '1fps Full-Features Check Renders'
engine: bi
fps: 24
fps_skip: 24
suffix: CH
format: JPG
extension: jpg
framedigits: 5
freestyle: True
antialias: 8
full:
name: 24fps Full
desc: 'Full Render with all Features Turned On'
engine: bi
fps: 24
fps_skip: 1
suffix: ''
format: PNG
extension: png
framedigits: 5
freestyle: True
antialias: 8
motionblur: 2
rendersize: 100
compress: 50

View File

@ -337,13 +337,13 @@ class AccumulationTests(unittest.TestCase):
print("A.get_data() = ", A.get_data())
print("A.source = ", A.source)
self.assertEqual(sorted(list(A.keys())), ['abx', 'testdict', 'testscalar'])
self.assertEqual(sorted(list(A.keys())), ['abx', 'render_profiles', 'testdict', 'testscalar'])
self.assertEqual(sorted(list(A['testdict'].keys())), ['A', 'B', 'C', 'D'])
self.assertEqual(sorted(list(A['testdict']['A'])), ['item1', 'item2', 'item3'])
self.assertDictEqual(A.source,
{'abx':'myproj', 'testdict':'myproj', 'testscalar':'myproj'})
{'abx':'myproj', 'render_profiles':'myproj', 'testdict':'myproj', 'testscalar':'myproj'})
self.assertDictEqual(A['testdict'].source, {
@ -368,13 +368,13 @@ class AccumulationTests(unittest.TestCase):
A.update(B)
self.assertEqual(sorted(list(A.keys())), ['abx', 'testdict', 'testscalar'])
self.assertEqual(sorted(list(A.keys())), ['abx', 'render_profiles', 'testdict', 'testscalar'])
self.assertEqual(sorted(list(A['testdict'].keys())), ['A', 'B', 'C', 'D'])
self.assertEqual(sorted(list(A['testdict']['A'])), ['item1', 'item2', 'item3', 'item4'])
self.assertDictEqual(A.source,
{'abx':'myproj', 'testdict':'myproj', 'testscalar':'pilot'})
{'abx':'myproj', 'render_profiles':'myproj', 'testdict':'myproj', 'testscalar':'pilot'})
self.assertDictEqual(A['testdict'].source, {
'A':'myproj', 'B':'pilot', 'C':'myproj', 'D':'myproj'})

View File

@ -27,13 +27,17 @@ class TestLoadingSchemaHierarchies(unittest.TestCase):
TESTLIBPATH = os.path.join(TESTDATA, 'myproject/Library/' +
'models/props/MyProp-By-me_here-prop.blend')
def test_load_std_schema_from_shotfile(self):
# Probably duplicates test_file_context
fc = file_context.FileContext(self.TESTPATH)
print("\n")
print( fc.schemas)
self.assertEqual(fc.schemas,
None)
def test_not_implemented_yet(self):
print("Library schema override not implemented yet")
self.assertTrue(True)
# def test_load_std_schema_from_shotfile(self):
# # Probably duplicates test_file_context
# fc = file_context.FileContext(self.TESTPATH)
# print("\n")
# print( fc.schemas)
# self.assertEqual(fc.schemas,
# None)

View File

@ -98,24 +98,24 @@ class FileContext_NameSchema_Interface_Tests(unittest.TestCase):
self.assertEqual(schema_chain[5].rank, 'camera')
self.assertEqual(schema_chain[5].codetype[1], ('c2', 'c2', 'c2'))
def test_FieldSchema_Branch_load_from_project_yaml(self):
with open(self.TESTPROJECTYAML, 'rt') as yaml_file:
data = yaml.safe_load(yaml_file)
schema_dicts = data['project_schema']
ranks = [s['rank'] for s in schema_dicts]
branch = ranks_mod.Branch(
ranks_mod.Trunk,
data['project_unit'][-1]['code'],
1,
ranks)
print("\nbranch = ", branch)
print("\nbranch.rank('project') = ", repr(branch.rank('project')))
self.assertTrue(False)
# def test_FieldSchema_Branch_load_from_project_yaml(self):
# with open(self.TESTPROJECTYAML, 'rt') as yaml_file:
# data = yaml.safe_load(yaml_file)
# schema_dicts = data['project_schema']
#
# ranks = [s['rank'] for s in schema_dicts]
#
# branch = ranks_mod.Branch(
# ranks_mod.Trunk,
# data['project_unit'][-1]['code'],
# 1,
# ranks)
#
# print("\nbranch = ", branch)
#
# print("\nbranch.rank('project') = ", repr(branch.rank('project')))
#
# self.assertTrue(False)

View File

@ -51,7 +51,7 @@ class TestRenderProfile_Implementation(unittest.TestCase):
self.assertIn('render_profiles', self.fc1.abx_fields)
def test_abx_data_default_full_profile_correct(self):
FullProfile = render_profile.RenderProfile(
FullProfile = render_profile.RenderProfile('full',
self.fc0.abx_fields['render_profiles']['full'])
FullProfile.apply(self.scene)