Added or updated all the docstrings throughout the project.

This commit is contained in:
filmfreedom-org 2021-05-30 01:48:38 -05:00
parent 361de218ca
commit 6e056fcef7
7 changed files with 787 additions and 32 deletions

View File

@ -111,6 +111,11 @@ seq_id_table = {
def get_seq_ids(self, context):
"""
Specific function to retrieve enumerated values for sequence units.
NOTE: due to be replaced by file_context features.
"""
#
# Note: To avoid the reference bug mentioned in the Blender documentation,
# we only return values held in the global seq_id_table, which
@ -214,6 +219,9 @@ render_profile_table = {
class LunaticsSceneProperties(bpy.types.PropertyGroup):
"""
Properties of the current scene.
NOTE: due to be replaced by 'ProjectProperties', using the schema data
retrieved by file_context.
"""
series_id = bpy.props.EnumProperty(
items=[
@ -286,6 +294,8 @@ class LunaticsSceneProperties(bpy.types.PropertyGroup):
class LunaticsScenePanel(bpy.types.Panel):
"""
Add a panel to the Properties-Scene screen
NOTE: To be replaced by 'ProjectPropertiesPanel'.
"""
bl_idname = 'SCENE_PT_lunatics'
bl_label = 'Lunatics Project'
@ -314,6 +324,9 @@ class LunaticsScenePanel(bpy.types.Panel):
class RenderProfileSettings(bpy.types.PropertyGroup):
"""
Settings for Render Profiles control.
NOTE: currently (0.2.6) uses hard-coded values. Planned to
switch to project-defined values.
"""
render_profile = bpy.props.EnumProperty(
name='Profile',
@ -435,6 +448,11 @@ class copy_animation_settings(bpy.types.PropertyGroup):
class CharacterPanel(bpy.types.Panel):
"""
Features for working with characters and armatures.
Currently only includes the CopyAnimation operator.
"""
bl_space_type = "VIEW_3D" # window type panel is displayed in
bl_context = "objectmode"
bl_region_type = "TOOLS" # region of window panel is displayed in
@ -456,7 +474,7 @@ class CharacterPanel(bpy.types.Panel):
class lunatics_compositing_settings(bpy.types.PropertyGroup):
"""
Settings for the LX compositor tool.
Settings for Ink/Paint Config.
"""
inkthru = bpy.props.BoolProperty(
name = "Ink-Thru",
@ -476,7 +494,7 @@ class lunatics_compositing_settings(bpy.types.PropertyGroup):
class lunatics_compositing(bpy.types.Operator):
"""
Set up standard Lunatics scene compositing.
Ink/Paint Config Operator.
"""
bl_idname = "scene.lunatics_compos"
bl_label = "Ink/Paint Config"
@ -507,6 +525,9 @@ class lunatics_compositing(bpy.types.Operator):
class LunaticsPanel(bpy.types.Panel):
"""
Ink/Paint Configuration panel.
"""
bl_space_type = "VIEW_3D"
bl_context = "objectmode"
bl_region_type = "TOOLS"
@ -527,6 +548,9 @@ BlendFile = file_context.FileContext()
@persistent
def update_handler(ctxt):
"""
Keeps FileContext up-to-date with Blender file loaded.
"""
BlendFile.update(bpy.data.filepath)

View File

@ -146,6 +146,19 @@ class UnionList(list):
increase the size of the result, because no new values will be found).
"""
def union(self, other):
"""
Returns a combination of the current list with unique new options added.
Arguments:
other (list): The other list from which new options will be taken.
Returns:
A list with the original options and any unique new options from the
other list. This is intentionally asymmetric behave which results
in the union operation being idempotent, retaining the original order,
and emulating the set 'union' behavior, except that non-unique entries
in the original list will be unharmed.
"""
combined = UnionList(self)
for element in other:
if element not in self:
@ -161,10 +174,38 @@ class RecursiveDict(collections.OrderedDict):
(when the replacement value is also a list).
"""
def clear(self):
"""
Clear the dictionary to an empty state.
"""
for key in self:
del self[key]
def update(self, mapping):
"""
Load information from another dictionary / mapping object.
mapping (dict):
The dictionary (or any mapping object) from which the update
is made. It does not matter if the object is a RecursiveDict
or not, it will result in the same behavior.
Unlike an ordinary dictionary update, this version works recursively.
If a key exists in both this dictionary and the dictionary from
which the update is being made, and that key is itself a dictionary,
it will be combined in the same way, rather than simply being
overwritten at the top level.
If the shared key represents a list in both dictionaries, then it
will be combined using the list's union operation.
This behavior allows multiple, deeply-nested dictionary objects to
be overlaid one on top of the other in a idempotent way, without
clobbering most content.
There are issues that can happen if a dictionary value is replaced
with a list or a scalar in the update source.
"""
for key in mapping:
if key in self:
if (isinstance(self[key], collections.abc.Mapping) and
@ -188,6 +229,9 @@ class RecursiveDict(collections.OrderedDict):
self[key] = mapping[key]
def get_data(self):
"""
Returns the contents stripped down to an ordinary Python dictionary.
"""
new = {}
for key in self:
if isinstance(self[key], RecursiveDict):
@ -225,18 +269,30 @@ class RecursiveDict(collections.OrderedDict):
return s
def from_yaml(self, yaml_string):
"""
Initialize dictionary from YAML contained in a string.
"""
self.update(yaml.safe_load(yaml_string))
return self
def from_yaml_file(self, path):
"""
Initialize dictionary from a separate YAML file on disk.
"""
with open(path, 'rt') as yamlfile:
self.update(yaml.safe_load(yamlfile))
return self
def to_yaml(self):
"""
Serialize dictionary contents into a YAML string.
"""
return yaml.dump(self.get_data())
def to_yaml_file(self, path):
"""
Serialize dictionary contents to a YAML file on disk.
"""
with open(path, 'wt') as yamlfile:
yamlfile.write(yaml.dump(self.get_data()))
@ -255,11 +311,12 @@ def collect_yaml_files(path, stems, dirmatch=False, sidecar=False, root='/'):
Does not attempt to read or interpret the files.
@path: The starting point, typically the antecedent filename.
@stems: File stem (or sequence of stems) we recognize (in priority order).
@dirmatch: Also search for stems matching the containing directory name?
@sidecar: Also search for stems matching the antecent filename's stem?
@root: Top level directory to consider (do not search above this).
Arguments:
path: The starting point, typically the antecedent filename.
stems: File stem (or sequence of stems) we recognize (in priority order).
dirmatch: Also search for stems matching the containing directory name?
sidecar: Also search for stems matching the antecedent filename's stem?
root: Top level directory to consider (do not search above this).
"Stem" means the name with any extension after "." removed (typically,
the filetype).
@ -294,6 +351,16 @@ def collect_yaml_files(path, stems, dirmatch=False, sidecar=False, root='/'):
def has_project_root(yaml_path):
"""
Does the YAML file contain the 'project_root' key?
Arguments:
yaml_path (str): Filepath to the current YAML file being processed.
Returns:
Whether or not the file contains the 'project_root' key defining its
containing folder as the root folder for this project.
"""
with open(yaml_path, 'rt') as yaml_file:
data = yaml.safe_load(yaml_file)
if 'project_root' in data:
@ -302,12 +369,30 @@ def has_project_root(yaml_path):
return False
def trim_to_project_root(yaml_paths):
"""
Trim the path to the project root location.
Arguments:
yaml_paths (list[str]): The list of YAML file paths.
Returns:
Same list, but with any files above the project root removed.
"""
for i in range(len(yaml_paths)-1,-1,-1):
if has_project_root(yaml_paths[i]):
return yaml_paths[i:]
return yaml_paths
def get_project_root(yaml_paths):
"""
Get the absolute file system path to the root folder.
Arguments:
yaml_paths (list[str]): The list of YAML file paths.
Returns:
The absolute path to the top of the project.
"""
trimmed = trim_to_project_root(yaml_paths)
if trimmed:
return os.path.dirname(trimmed[0])
@ -316,6 +401,15 @@ def get_project_root(yaml_paths):
return '/'
def combine_yaml(yaml_paths):
"""
Merge a list of YAML texts into a single dictionary object.
Arguments:
yaml_paths (list[str]): The list of YAML file paths to be combined.
Returns:
A RecursiveDict containing the collected data.
"""
data = RecursiveDict()
for path in yaml_paths:
with open(path, 'rt') as yaml_file:
@ -323,6 +417,16 @@ def combine_yaml(yaml_paths):
return data
def get_project_data(filepath):
"""
Collect the project data from the file system.
Arguments:
filepath (str): Path to the file.
Returns:
Data collected from YAML files going up the
tree to the project root.
"""
# First, get the KitCAT data.
kitcat_paths = collect_yaml_files(filepath,
('kitcat', 'project'), dirmatch=True, sidecar=True)

View File

@ -1,6 +1,28 @@
# copy_anim.py
"""
Blender Python code to copy animation between armatures or proxy armatures.
The purpose of the 'Copy Animation' feature is to allow for animation to be
copied from one armature to another, en masse, rather than having to individual
push and move action objects.
The main use for this is to repair files in which animated proxy rigs have
become incompatible or broken for some reason. Common examples include a name
change in the rig or armature object in a character asset file, extra bones
added, and so on. There is no simple way in Blender to update these proxies.
It is possible to create a new proxy, though, and with this tool to speed up
the process, the animation can be transferred to it all at once.
The tool also allows for the animation to be correctly copied and scaled by
a scale factor, so that animation can be copied from a proxy defined at one
scale to one defined at another.
This comes up when an animation file was built incorrectly at the wrong scale
and needs to be corrected, after animating has already begun.
The scaling feature has been tested on Rigify-based rigs, and resets the
bone constraints as needed, during the process.
"""
import bpy, bpy.types, bpy.utils, bpy.props

View File

@ -72,7 +72,14 @@ from .accumulate import RecursiveDict
wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)')
class Enum(dict):
def __init__(self, *options):
"""
List of options defined in a two-way dictionary.
"""
def __init__(self, *options):
"""
Args:
*options (list): a list of strings to be used as enumerated values.
"""
for i, option in enumerate(options):
if isinstance(option, list) or isinstance(option, tuple):
name = option[0]
@ -87,8 +94,11 @@ class Enum(dict):
@property
def options(self):
"""
This gives the options in a Blender-friendly format, with
tuples of three strings for initializing bpy.props.Enum().
Gives the options in a Blender-friendly format.
Returns:
A list of triples containing the three required fields for
Blender's bpy.props.EnumProperty.
If the Enum was initialized with strings, the options will
contain the same string three times. If initialized with
@ -99,6 +109,15 @@ class Enum(dict):
return [self[i] for i in number_keys]
def name(self, n):
"""
Return the name (str) value of enum, regardless of which is provided.
Args:
n (str, int): An enum value (either number or string).
Returns:
Returns a string if n is recognized. Returns None if not.
"""
if type(n) is int:
return self[n][0]
elif type(n) is str:
@ -107,6 +126,15 @@ class Enum(dict):
return None
def number(self, n):
"""
Return the number (int) value of enum, regardless of which is provided.
Args:
n (str, int): An enum value (either number or string).
Returns:
Returns a number if n is recognized. Returns None if not.
"""
if type(n) is str:
return self[n]
elif type(n) is int:
@ -304,7 +332,9 @@ class Parser_ABX_Episode:
@registered_parser
class Parser_ABX_Schema(object):
"""
Parser based on using the project_schema defined in the project root directory YAML.
Parser based on using the list of schemas.
The schemas are normally defined in the project root directory YAML.
"""
name = 'abx_schema'
@ -449,8 +479,9 @@ class Parser_ABX_Schema(object):
@registered_parser
class Parser_ABX_Fallback(object):
"""
Highly-tolerant parser to fall back to if the others fail
or can't be used.
Highly-tolerant parser to fall back to if others fail.
Makes very minimal assumptions about filename structure.
"""
name = 'abx_fallback'
@ -519,11 +550,87 @@ class Parser_ABX_Fallback(object):
class RankNotFound(LookupError):
"""
Error returned if an unexpected 'rank' is encountered.
"""
pass
class NameSchema(object):
"""
Represents a schema used for parsing and constructing designations, names, etc.
Represents a schema used for parsing and constructing names.
We need naming information in various formats, based on knowledge about
the role of the Blender file and scene in the project. This object tracks
this information and returns correct names based on it via properties.
Note that NameSchema is NOT an individual project unit name, but a defined
pattern for how names are treat at that level in the project. It is a class
of names, not a name itself. Thus "shot" has a schema, but is distinct from
"shot A" which is a particular "project unit". The job of the schema is
to tell us things like "shots in this project will be represented by
single capital letters".
See NameContext for the characteristics of a particular unit.
Attributes:
codetype (type): Type of code name used for this rank.
Usually it will be int, str, or Enum.
Pre-defined enumerations are available
for uppercase letters (_letters) and
lowercase letters (_lowercase) (Roman --
in principle, other alphabets could be added).
rank (int): Rank of hierarchy under project (which is 0). The
rank value increases as you go "down" the tree.
Sorry about that confusion.
ranks (list(Enum)): List of named ranks known to schema (may include
both higher and lower ranks).
parent (NameSchema|None):
Earlier rank to which this schema is attached.
format (str): Code for formatting with Python str.format() method.
Optional: format can also be specified with the
following settings, or left to default formatting.
pad (str): Padding character.
minlength (int): Minimum character length (0 means it may be empty).
maxlength (int): Maximum character length (0 means no limit).
words (bool): Treat name/title fields like a collection of words,
which can then be represented using "TitleCaps" or
"underscore_spacing", etc for identifier use.
delimiter (str): Field delimiter marking the end of this ranks'
code in designations. Note this is the delimiter
after this rank - the higher (lower value) rank
controls the delimiter used before it.
default: The default value for this rank. May be None,
in which case, the rank will be treated as unset
until a setting is made. The UI must provide a
means to restore the unset value. Having no values
set below a certain rank is how a NameContext's
rank is determined.
Note that the rank may go back to a lower value than the schema's
parent object in order to overwrite earlier schemas (for overriding a
particular branch in the project) or (compare this to the use of '..'
in operating system paths). Or it may skip a rank, indicating an
implied intermediate value, which will be treated as having a fixed
value. (I'm not certain I want that, but it would allow us to keep
rank numbers synchronized better in parallel hierarchies in a project).
Note that schemas can be overridden at any level in a project by
'project_schema' directives in unit YAML files, so it is possible to
change the schema behavior locally. By design, only lower levels in
the hierarchy (higher values of rank) can be affected by overrides.
This kind of use isn't fully developed yet, but the plan is to include
things like managing 'Library' assets with a very different structure
from shot files. This way, the project can split into 'Library' and
'Episode' forks with completely different schemas for each.
"""
# Defaults
_default_schema = {
@ -543,6 +650,8 @@ class NameSchema(object):
'block', 'camera', 'shot', 'element')
}
# Really this is more like a set than a dictionary right now, but I
# thought I might refactor to move the definitions into the dictionary:
_codetypes = {
'number':{},
'string':{},
@ -560,6 +669,33 @@ class NameSchema(object):
ranks = ('project',)
def __init__(self, parent=None, rank=None, schema=None, debug=False):
"""
Create a NameSchema from schema data source.
NameSchema is typically initialized based on data from YAML files
within the project. This allows us to avoid encoding project structure
into ABX, leaving how units are named up to the production designer.
If you want our suggestions, you can look at the "Lunatics!" project's
'lunatics.yaml' file, or the 'myproject.yaml' file in the ABX source
distribution.
Arguments:
parent (NameSchema):
The level in the schema hierarchy above this one.
Should be None if this is the top.
rank (int): The rank of this schema to be created.
schema (dict): Data defining the schema, typically loaded from a
YAML file in the project.
debug (bool): Used only for testing. Turns on some verbose output
about internal implementation.
Note that the 'rank' is specified because it may NOT be sequential from
the parent schema.
"""
# Three types of schema data:
# Make sure schema is a copy -- no side effects!
@ -680,10 +816,8 @@ class NameSchema(object):
option = (str(key), str(val), str(val))
self.codetype.append(option)
else:
# If all else fails, just list the string
# If all else fails
self.codetype = None
def __repr__(self):
return('<(%s).NameSchema: %s (%s, %s, %s, (%s))>' % (
@ -700,6 +834,90 @@ class NameSchema(object):
class NameContext(object):
"""
Single naming context within the file (e.g. a Blender scene).
NameContext defines the characteristics of any particular project
unit (taxon) within the project. So, for example, it may represent
the context of an "Episode" or a "Sequence" or a "Shot".
Used in Blender, it will typically be used in two ways: one to represent
the entire file (as the base class for FileContext) and one to represent
a particular Blender scene within the file, which may represent a single
shot, multiple shots with the same camera, or perhaps just an element
of a compositing shot created from multiple Blender scenes.
Examples of all three uses occur in "Lunatics!" episode 1: the
"Press Conference" uses multicam workflow, with scenes for each camera
with multiple shots selected from the timeline, using the VSE; most scenes
are done 'single camera' on a shot-per-scene basis; but some shots use a
'Freestyle camera clipping' technique which puts the line render in a
separate (but linked) scene, while the final shot in the episode combines
three different Blender scenes in a 2D composite, to effect a smooth
transition to the titles.
Attributes:
container (NameContext):
The project unit that contains this one. One step
up the tree, but not necessarily the next step up
in rank (because there can be skipped ranks).
schemas (list(NameSchema)):
The schema list as seen by this unit, taking into
account any schema overrides.
namepath_segment (list):
List of namepath codes defined in this object, not
including the container's namepath. (Implementation)
omit_ranks dict(str:int):
How many ranks to omit from the beginning in shortened
names for specific uses. (Implementation).
Probably a mistake this isn't in the NameSchema instead.
fields (dict): The field values used to initialize the NameContext,
May include some details not defined in this attribution
API, and it includes the raw state of the 'name', 'code',
and 'title' fields, determining which are
authoritative -- i.e. fields which aren't specified are
left to 'float', being generated by the related ones.
Thus, name implies title, or title implies name. You
can have one or the other or both, but the other will be
generated from the provided one if it isn't specified.
(Implementation).
code (str|int|Enum):
Identification code for the unit (I replaced 'id' in earlier
versions, because 'id' is a reserved word in Python for Python
memory reference / pointer identity.
(R/W Property).
namepath (list):
List of codes for project units above this one.
(R/O Property, generated from namepath_segment and
container).
rank (int): Rank of this unit (same as in Schema).
(R/W Property, may affect other attributes).
name (str): Short name for the unit.
(R/W Property, may affect title).
title (str): Full title for the unit.
(R/W Property, may affect name).
designation (str):
Full designation for the unit, including all
the namepath elements, but no title.
fullname (str): The full designation, plus the current unit name.
shortname (str):Abbreviated designation, according to omit_ranks,
with name.
"""
def __init__(self, container, fields=None, namepath_segment=(), ):
@ -887,6 +1105,13 @@ class NameContext(object):
self._compress_name(self.name))
def get_scene_name(self, suffix=''):
"""
Create a name for the current scene, based on namepath.
Arguments:
suffix (str): Optional suffix code used to improve
identifications of scenes.
"""
namebase = self.omit_ranks['scene']*2
desig = ''.join(self._get_name_components()[namebase:])
@ -896,7 +1121,23 @@ class NameContext(object):
return desig
def get_render_path(self, suffix='', framedigits=5, ext='png'):
"""
Create a render filepath, based on namepath and parameters.
Arguments:
suffix (str):
Optional unique code (usually for render profile).
framedigits (int):
How many digits to reserve for frame number.
ext (str):
Filetype extension for the render.
This is meant to be called by render_profile to combine namepath
based name with parameters for the specific render, to uniquely
idenfify movie or image-stream output.
"""
desig = ''.join(self._get_name_components()[self.omit_ranks['render']+1:])
if ext in ('avi', 'mov', 'mp4', 'mkv'):
@ -921,12 +1162,138 @@ class NameContext(object):
class FileContext(NameContext):
"""
Collected information about an object's location on disk: metadata
about filename, directory names, and project, based on expected keywords.
Collected information about a file's storage location on disk.
Collects name and path information from a filepath, used to identify
the file's role in a project. In order to do this correctly, the
FileContext object needs a schema defined for the project, which
explains how to read and parse project file names, to determine what
unit, name, or role they might have in the project.
For this, you will need to have a <project>.yaml file which defines
the 'project_schema' (a list of dictionaries used to initialize a list
of NameSchema objects). Examples of <project>.yaml are provided in the
'myproject.yaml' file in the test data in the source distribution of
ABX, and you can also see a "live" example in the "Lunatics!" project.
Subclass from NameContext, so please read more information there.
Attributes:
root (filepath):
The root directory of the project as an absolute operating system
filepath. This should be used for finding the root where it is
currently, not stored for permanent use, as it will be wrong if
the project is relocated.
render_root (filepath):
The root directory for rendering. We often have this symlinked to
a large drive to avoid congestion. Usually just <root>/Renders.
filetype (str):
Filetype code or extension for this file. Usually identifies what
sort of file it is and may imply how it is used in some cases.
role (str):
Explicit definition of file's role in the project, according to
roles specified in <project>.yaml. For a default, see 'abx.yaml'
in the ABX source code. Derived from the file name.
title (str):
Title derived from the filename.
The relationship between this and the NameContext title is unclear
at present -- probably we should be setting the NameContext.title
property from here (?)
comment (str):
Comment field from the filename. This is a free field generally
occurring after the role, using a special delimiter and meant to
be readable by humans. It may indicate an informal backup or
saved version of the file outside of the VCS, as opposed to
a main, VCS-tracked copy. Or it may indicate some variant version
of the file.
name_contexts (list[NameContext]):
A list of NameContext objects contained in this file, typically
one-per-scene in a Blender file.
filepath (str):
O/S and location dependent absolute path to the file.
filename (str):
Unaltered filename from disk.
file_exists (bool):
Does the file exist on disk (yet)?
This may be false if the filename has been determined inside
the application, but the file has not been written to disk yet.
folder_exists (bool):
Does the containing folder exist (yet)?
folders (list(str)):
List of folder names from the project root to the current file,
forming a relative path from the root to this file.
omit_ranks (dict[str:int]):
How many ranks are omitted from the beginning of filename
fields? (Implementation).
provided_data (RecursiveDict):
The pile of data from project YAML files. This is a special
dictionary object that does "deep updates" in which sub-dictionaries
and sub-lists are updated recursively rather than simply being
replaced at the top level. This allows the provided_data to
accumulate information as it looks up the project tree to the
project root. It is not recommended to directly access this data.
(Implementation)
abx_fields (RecursiveDict):
A pile of 'abx.yaml' file with directives affecting how ABX should
behave with this file. This can be used to set custom behavior in
different project units. For example, we use it to define different
render profiles for different project units.
notes (list(str)):
A primitive logging facility. This stores warning and information
messages about the discovery process to aid the production designer
in setting up the project correctly.
NOTE that the clear method does not clear the notes! There is a
separate clear_notes() method.
parsers (list):
A list of registered parser implementations for analyzing file
names. FileContext tries them all, and picks the parser which
reports the best score -- that is, parser score themselves on
how likely their parse is to be correct. So if a parser hits a
problem, it demerits its score, allowing another parser to take
over.
Currently there are only three parsers provided: a custom one,
originally written to be specific to "Lunatics!" episodes
('abx_episode', now obsolete?), a parser using the project_schema
system ('abx_schema', now the preferred choice), and a "dumb"
parser design to fallback on if no schema is provided, which reads
only the filetype and possible role, title, and comment fields,
guessing from common usage with no explicit schema
('abx_fallback').
This implementation could probably benefit from some more application of
computer science and artificial intelligence, but I've settled on a
"good enough" solution and the assumption that production designers would
probably rather just learn how to use the YAML schemas correctly, than
to try to second-guess a sloppy AI system.
As of v0.2.6, FileContext does NOT support getting any information
directly from the operating system path for the file (i.e. by reading
directory names), although this would seem to be a good idea.
Therefore, project units have to be specified by additional unit-level
YAML documents (these can be quite small), explicitly setting the
unit-level information for directories above the current object, and
by inference from the project schema and the filename (which on "Lunatics!"
conveys all the necessary information for shot files, but perhaps not
for library asset files).
"""
# hierarchies = ()
# hierarchy = None
#schema = None
# IMMUTABLE DEFAULTS:
filepath = None
@ -959,6 +1326,12 @@ class FileContext(NameContext):
self.update(path)
def clear(self):
"""
Clear the contents of the FileContext object.
Nearly the same as reinitializing, but the notes
attribute is left alone, to preserve the log history.
"""
NameContext.clear(self)
# Identity
@ -989,11 +1362,17 @@ class FileContext(NameContext):
self.abx_fields = DEFAULT_YAML['abx']
def clear_notes(self):
"""
Clear the log history in the notes attribute.
"""
# We use this for logging, so it doesn't get cleared by the
# normal clear process.
self.notes = []
def update(self, path):
"""
Update the FileContext based on a new file path.
"""
# Basic File Path Info
self.filepath = os.path.abspath(path)
self.filename = os.path.basename(path)
@ -1107,11 +1486,20 @@ class FileContext(NameContext):
return s
def log(self, level, msg):
"""
Log a message to the notes attribute.
This is a simple facility for tracking issues with the production
source tree layout, schemas, and file contexts.
"""
if type(level) is str:
level = log_level.index(level)
self.notes.append((level, msg))
def get_log_text(self, level=log_level.INFO):
"""
Returns the notes attribute as a block of text.
"""
level = log_level.number(level)
return '\n'.join([
': '.join((log_level.name(note[0]), note[1]))
@ -1148,6 +1536,9 @@ class FileContext(NameContext):
@property
def filetype(self):
"""
Filetype suffix for the file (usually identifies format).
"""
if 'filetype' in self.fields:
return self.fields['filetype']
else:
@ -1159,6 +1550,9 @@ class FileContext(NameContext):
@property
def role(self):
"""
Role field from the filename, or guessed from filetype.
"""
if 'role' in self.fields:
return self.fields['role']
else:
@ -1170,6 +1564,9 @@ class FileContext(NameContext):
@property
def title(self):
"""
Title field parsed from the file name.
"""
if 'title' in self.fields:
return self.fields['title']
else:
@ -1181,6 +1578,12 @@ class FileContext(NameContext):
@property
def comment(self):
"""
Comment field parsed from the filename.
Meant to be a human-readable extension to the filename, often used to
represent an informal version, date, or variation on the file.
"""
if 'comment' in self.fields:
return self.fields['comment']
else:
@ -1192,6 +1595,9 @@ class FileContext(NameContext):
@classmethod
def deref_implications(cls, values, matchfields):
"""
NOT USED: Interpret information from reading folder names.
"""
subvalues = {}
for key in values:
# TODO: is it safe to use type tests here instead of duck tests?
@ -1206,6 +1612,9 @@ class FileContext(NameContext):
return subvalues
def get_path_implications(self, path):
"""
NOT USED: Extract information from folder names.
"""
data = {}
prefix = r'(?:.*/)?'
suffix = r'(?:/.*)?'
@ -1217,9 +1626,10 @@ class FileContext(NameContext):
def new_name_context(self, rank=None, **kwargs):
"""
Get a subunit from the current file.
Any rank in the hierarchy may be specified, though element, shot,
camera, and block are most likely.
Get a NameContext object representing a portion of this file.
In Blender, generally in a 1:1 relationship with locally-defined
scenes.
"""
fields = {}
fields.update(self.fields)

View File

@ -1,7 +1,37 @@
# std_lunatics_ink.py
# ink_paint.py
"""
Functions to set up the standard ink and paint compositing arrangement
for "Lunatics"
Standard Ink & Paint compositing, as used in "Lunatics!"
The purpose of the "Ink/Paint Config" feature is to simplify setting up
the render layers and compositing nodes for EXR Ink/Paint configuration,
including the most common settings in our project.
I'm not making any attempt to generalize this feature, since that would
complicate the interface and defeat the purpose. Nor can I think of any
clean way to define this functionality in the project YAML code, without
it becoming truly byzantine. So it's set up like it is, and if you like
"Lunatics!" style, you may find it useful. Otherwise, you'll have to
write your own Add-on.
It does support a few common variations:
* Optional 'Ink-Thru' feature, allowing Freestyle lines behind
'transparent' objects to be seen. 'Transparent' objects, in this
context has to do with them being on scene layer 4, not with any
material settings.
* Optional 'Billboard' feature, allowing correct masking of Freestyle
ink lines around pre-rendered "billboard" objects, using transparent
texture maps. That is, background lines are drawn up to where the
visible part of the billboard would obscure them.
* Optional 'Separate Sky' compositing. This is a work-around to allow
sky backgrounds to be composited correctly without being shadowed by
the separate shadow layer. Basically a work-around for a design flaw
in the BLENDER_INTERNAL renderer, when combined with compositing.
Currently (0.2.6), it only supports setups for 'cam' file rendering. It does
not set up post-compositing files. This feature is on my road map.
"""
import os
@ -21,6 +51,10 @@ THRU_INK_COLOR = (20,100,50)
class LunaticsShot(object):
"""
General class for Lunatics Blender Scene data.
So far in 0.2.6, this duplicates a lot of function that file_context is
supposed to provide, with hard-coding naming methods. My plan is to strip
all that out and replace with calls to the appropriate NameContext object.
"""
colorcode = {
'paint': (1.00, 1.00, 1.00),
@ -86,6 +120,9 @@ class LunaticsShot(object):
return path
def cfg_scene(self, scene=None, thru=True, exr=True, multicam=False, role='shot'):
"""
Configure the Blender scene for Ink/Paint.
"""
if not scene:
scene = self.scene
@ -145,6 +182,9 @@ class LunaticsShot(object):
return rlayer_in
def cfg_nodes(self, scene):
"""
Configure the compositing nodes.
"""
# Create Compositing Node Tree
scene.use_nodes = True
tree = scene.node_tree
@ -504,7 +544,9 @@ class LunaticsShot(object):
def cfg_paint(self, paint_layer, name="Paint"):
"""
Configure the 'Paint' render layer.
"""
self._cfg_renderlayer(paint_layer,
includes=True, passes=False, excludes=False,
layers = (0,1,2,3,4, 5,6,7, 10,11,12,13,14))
@ -535,6 +577,9 @@ class LunaticsShot(object):
def cfg_bbalpha(self, bb_render_layer):
"""
Configure the 'BB Alpha' render layer for billboards.
"""
self._cfg_renderlayer(bb_render_layer,
includes=False, passes=False, excludes=False,
layers=(5,6, 14))
@ -545,6 +590,9 @@ class LunaticsShot(object):
bb_render_layer.use_pass_combined = True
def cfg_bbmat(self, bb_mat_layer, thru=False):
"""
Configure the 'BB Mat' material key render layer.
"""
self._cfg_renderlayer(bb_mat_layer,
includes=False, passes=False, excludes=False,
layers=(0,1,2,3, 5,6,7, 10,11,12,13,14, 15,16))
@ -561,6 +609,9 @@ class LunaticsShot(object):
def cfg_sky(self, sky_render_layer):
"""
Configure the separate 'Sky' render layer.
"""
self._cfg_renderlayer(sky_render_layer,
includes=False, passes=False, excludes=False,
layers=(0,1,2,3,4, 5,6,7, 10,11,12,13,14))
@ -571,6 +622,9 @@ class LunaticsShot(object):
def cfg_ink(self, ink_layer, name="Ink", thickness=3, color=(0,0,0)):
"""
Configure a render layer for Freestyle ink ('Ink' or 'Ink-Thru').
"""
self._cfg_renderlayer(ink_layer,
includes=False, passes=False, excludes=False,
layers=(0,1,2,3, 5,6,7, 10,11,12,13, 15,16))
@ -600,7 +654,7 @@ class LunaticsShot(object):
def cfg_lineset(self, lineset, thickness=3, color=(0,0,0)):
"""
Configure the lineset.
Configure the Freestyle line set (i.e. which lines are drawn).
"""
#lineset.name = 'NormalInk'
# Selection options
@ -640,6 +694,9 @@ class LunaticsShot(object):
def cfg_linestyle(self, linestyle, thickness=INK_THICKNESS, color=INK_COLOR):
"""
Configure Freestyle line style (i.e. how the lines are drawn).
"""
# These are the only changeable parameters:
linestyle.color = color
linestyle.thickness = thickness

View File

@ -19,9 +19,30 @@ from .accumulate import UnionList, RecursiveDict
import yaml
def EnumFromList(schema, listname):
"""
Convert options from a list of strings referenced by key name.
Args:
schema (dict): definition of the property group containing the enum.
options (list): list of options as simple strings.
Returns:
List of options as tuples, as needed by Blender.
"""
return [(e, e.capitalize(), e.capitalize()) for e in schema[listname]]
def ExpandEnumList(schema, options):
"""
Convert options from a direct list.
Args:
schema (dict): definition of the property group containing the enum.
options (list): list of options. Individual options can be strings,
pairs, or triples.
Returns:
A list of triples defining the enumerated values as needed for Blender.
"""
blender_options = []
for option in options:
if type(option) is str:
@ -33,8 +54,13 @@ def ExpandEnumList(schema, options):
class PropertyGroupFactory(bpy.types.PropertyGroup):
"""
Metadata property group factory for attachment to Blender object types.
Definitions come from a YAML source (or default defined below).
Property group factory for attachment to Blender object types.
Structure of the property group returned is determined by a dictionary
schema, which may be loaded from a YAML file.
This is a "class factory", a class which returns another class when
called.
"""
# These values mirror the Blender documentation for the bpy.props types:
prop_types = {

View File

@ -1,6 +1,21 @@
# render_profile.py
"""
Blender Python code to set parameters based on render profiles.
The purpose of the "Render Profiles" feature is to simplify setting up
Blender to render animation according to a small number of standardized,
named profiles, instead of having to control each setting separately.
They're sort of like predefined radio buttons for your render settings.
I wrote this because I kept having to repeat the same steps to go from
quick "GL" or "Paint" renders at low frame rates to fully-configured
final renders, and I found the process was error-prone.
In particular, it was very easy to accidentally forget to change the render
filepath and have a previous render get overwritten! Or, alternatively, I
might forget to set things back up for a final render after I did a previz
animation.
"""
import bpy
@ -12,6 +27,100 @@ from . import file_context
class RenderProfile(object):
"""
A named set of render settings for Blender.
The profile is designed to be defined by a dictionary of fields, typically
loaded from a project YAML file (under the key 'render_profiles').
Attributes:
engine (str):
Mandatory choice of engine. Some aliases are supported, but the
standard values are: 'gl', meaning a setup for GL viewport
rendering, or one 'bi'/'BLENDER_INTERNAL', 'cycles'/'CYCLES',
or 'bge' / 'BLENDER_GAME' for rendering with the respective
engines. There is no support for Eevee, because this is a 2.7-only
Add-on. It should be included in the port. No third-party engines
are currently supported.
fps (float):
Frames-per-second.
fps_skip (int):
Frames to skip between rendered frames (effectively divides the
frame rate).
fps_divisor (float):
This is the weird hack for specifying NTSC-compliant fps of 29.97
by using 1.001 as a divisor, instead of 1.0. Avoid if you can!
rendersize (int):
Percentage size of defined pixel dimensions to render. Note that
we don't support setting the pixel size directly. You should
configure that in Blender, but you can use this feature to make
a lower-resolution render.
compress (int):
Compression ratio for image formats that support it.
format (str):
Image or video output format.
One of: 'PNG', 'JPG', 'EXR', 'AVI' or 'MKV'.
Note that we don't support the full range of options, just some
common ones for previz and final rendering.
freestyle (bool):
Whether to turn on Freestyle ink rendering.
antialiasing_samples (str):
Controlled by 'antialias' key, which can be a number: 5,8,11, or 16.
Note that this attribute, which is used to directly set the value
in Blender is a string, not an integer.
use_antialiasing (bool):
Controlled by 'antialias' key. Whether to turn on antialiasing.
Any value other than 'False' or 'None' will turn it on.
False turns it off. None leaves it as-is.
motion_blur_samples (int):
Controlled by 'motionblur' key, which can be a number determining
the number of samples.
use_motion_blur (bool):
Controlled by 'motionblur' key. Any value other than False or None
will turn on motion blur. A value of True turns it on without
changing the samples. A value of False turns it off. None causes
is to be left as-is.
framedigits (int):
The number of '#' characters to use in the render filename to
indicate frame number. Only used if the format is an image stream.
suffix (str):
A string suffix placed after the base name, but before the frame
number to indicate what profile was used for the render. This
avoids accidentally overwriting renders made with other profiles.
Note that these attributes are not intended to be manipulated directly
by the user. The production designer is expected to define these
profiles in the <project>.yaml file under the 'render_profiles' key,
like this:
render_profiles:
previz:
engine: gl
suffix: MP
fps: 30
fps_skip: 6
motionblur: False
antialias: False
freestyle: False
rendersize: 50
and so on. This is then loaded by ABX into a list of RenderProfile
objects. Calling the RenderProfile.apply() method actually causes the
settings to be made.
"""
render_formats = {
# VERY simplified and limited list of formats from Blender that we need:
# <API 'format'>: (<bpy file format>, <filename extension>),
@ -104,6 +213,9 @@ class RenderProfile(object):
def apply(self, scene):
"""
Apply the profile settings to the given scene.
NOTE: in 0.2.6 this function isn't fully implemented, and the
render filepath will not include the proper unit name.
"""
if self.engine: scene.render.engine = self.engine
if self.fps: scene.render.fps = self.fps