Refactoring to separate file_context elements. Updates to ranks.py
This commit is contained in:
parent
6ff5b6b4f5
commit
43a7adb379
77
abx/enum.py
Normal file
77
abx/enum.py
Normal file
@ -0,0 +1,77 @@
|
||||
# enum.py
|
||||
"""
|
||||
A custom enumeration type that supports ordering and Blender enum UI requirements.
|
||||
"""
|
||||
|
||||
|
||||
class Enum(dict):
|
||||
"""
|
||||
List of options defined in a two-way dictionary.
|
||||
"""
|
||||
def __init__(self, *options):
|
||||
"""
|
||||
Args:
|
||||
*options (list): a list of strings to be used as enumerated values.
|
||||
"""
|
||||
for i, option in enumerate(options):
|
||||
if isinstance(option, list) or isinstance(option, tuple):
|
||||
name = option[0]
|
||||
self[i] = tuple(option)
|
||||
else:
|
||||
name = str(option)
|
||||
self[i] = (option, option, option)
|
||||
self[name] = i
|
||||
if name not in ('name', 'number', 'options'):
|
||||
setattr(self, name, i)
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
"""
|
||||
Gives the options in a Blender-friendly format.
|
||||
|
||||
Returns:
|
||||
A list of triples containing the three required fields for
|
||||
Blender's bpy.props.EnumProperty.
|
||||
|
||||
If the Enum was initialized with strings, the options will
|
||||
contain the same string three times. If initialized with
|
||||
tuples of strings, they will be used unaltered.
|
||||
"""
|
||||
options = []
|
||||
number_keys = sorted([k for k in self.keys() if type(k) is int])
|
||||
return [self[i] for i in number_keys]
|
||||
|
||||
def name(self, n):
|
||||
"""
|
||||
Return the name (str) value of enum, regardless of which is provided.
|
||||
|
||||
Args:
|
||||
n (str, int): An enum value (either number or string).
|
||||
|
||||
Returns:
|
||||
Returns a string if n is recognized. Returns None if not.
|
||||
"""
|
||||
if type(n) is int:
|
||||
return self[n][0]
|
||||
elif type(n) is str:
|
||||
return n
|
||||
else:
|
||||
return None
|
||||
|
||||
def number(self, n):
|
||||
"""
|
||||
Return the number (int) value of enum, regardless of which is provided.
|
||||
|
||||
Args:
|
||||
n (str, int): An enum value (either number or string).
|
||||
|
||||
Returns:
|
||||
Returns a number if n is recognized. Returns None if not.
|
||||
"""
|
||||
if type(n) is str:
|
||||
return self[n]
|
||||
elif type(n) is int:
|
||||
return n
|
||||
else:
|
||||
return None
|
||||
|
1157
abx/file_context.py
1157
abx/file_context.py
File diff suppressed because it is too large
Load Diff
337
abx/name_context.py
Normal file
337
abx/name_context.py
Normal file
@ -0,0 +1,337 @@
|
||||
# name_context.py
|
||||
"""
|
||||
NameContext defines the context of a particular named asset.
|
||||
|
||||
Examples include scenes in a Blender file, but could be applied to other things.
|
||||
Also a base class for FileContext which applies to the whole file.
|
||||
|
||||
NameContext handles the unified needs of these types, and in particular,
|
||||
hosts the methods used to generate names.
|
||||
"""
|
||||
|
||||
import os, re
|
||||
|
||||
import yaml
|
||||
|
||||
from .name_schema import FieldSchema
|
||||
|
||||
class NameContext(object):
|
||||
"""
|
||||
Single naming context within the file (e.g. a Blender scene).
|
||||
|
||||
NameContext defines the characteristics of any particular project
|
||||
unit (taxon) within the project. So, for example, it may represent
|
||||
the context of an "Episode" or a "Sequence" or a "Shot".
|
||||
|
||||
Used in Blender, it will typically be used in two ways: one to represent
|
||||
the entire file (as the base class for FileContext) and one to represent
|
||||
a particular Blender scene within the file, which may represent a single
|
||||
shot, multiple shots with the same camera, or perhaps just an element
|
||||
of a compositing shot created from multiple Blender scenes.
|
||||
|
||||
Examples of all three uses occur in "Lunatics!" episode 1: the
|
||||
"Press Conference" uses multicam workflow, with scenes for each camera
|
||||
with multiple shots selected from the timeline, using the VSE; most scenes
|
||||
are done 'single camera' on a shot-per-scene basis; but some shots use a
|
||||
'Freestyle camera clipping' technique which puts the line render in a
|
||||
separate (but linked) scene, while the final shot in the episode combines
|
||||
three different Blender scenes in a 2D composite, to effect a smooth
|
||||
transition to the titles.
|
||||
|
||||
Attributes:
|
||||
container (NameContext):
|
||||
The project unit that contains this one. One step
|
||||
up the tree, but not necessarily the next step up
|
||||
in rank (because there can be skipped ranks).
|
||||
|
||||
schemas (list(FieldSchema)):
|
||||
The schema list as seen by this unit, taking into
|
||||
account any schema overrides.
|
||||
|
||||
namepath_segment (list):
|
||||
List of namepath codes defined in this object, not
|
||||
including the container's namepath. (Implementation)
|
||||
|
||||
omit_ranks dict(str:int):
|
||||
How many ranks to omit from the beginning in shortened
|
||||
names for specific uses. (Implementation).
|
||||
Probably a mistake this isn't in the FieldSchema instead.
|
||||
|
||||
fields (dict): The field values used to initialize the NameContext,
|
||||
May include some details not defined in this attribution
|
||||
API, and it includes the raw state of the 'name', 'code',
|
||||
and 'title' fields, determining which are
|
||||
authoritative -- i.e. fields which aren't specified are
|
||||
left to 'float', being generated by the related ones.
|
||||
Thus, name implies title, or title implies name. You
|
||||
can have one or the other or both, but the other will be
|
||||
generated from the provided one if it isn't specified.
|
||||
(Implementation).
|
||||
|
||||
code (str|int|Enum):
|
||||
Identification code for the unit (I replaced 'id' in earlier
|
||||
versions, because 'id' is a reserved word in Python for Python
|
||||
memory reference / pointer identity.
|
||||
(R/W Property).
|
||||
|
||||
namepath (list):
|
||||
List of codes for project units above this one.
|
||||
(R/O Property, generated from namepath_segment and
|
||||
container).
|
||||
|
||||
rank (int): Rank of this unit (same as in Schema).
|
||||
(R/W Property, may affect other attributes).
|
||||
|
||||
name (str): Short name for the unit.
|
||||
(R/W Property, may affect title).
|
||||
|
||||
title (str): Full title for the unit.
|
||||
(R/W Property, may affect name).
|
||||
|
||||
designation (str):
|
||||
Full designation for the unit, including all
|
||||
the namepath elements, but no title.
|
||||
|
||||
fullname (str): The full designation, plus the current unit name.
|
||||
|
||||
shortname (str):Abbreviated designation, according to omit_ranks,
|
||||
with name.
|
||||
"""
|
||||
|
||||
def __init__(self, container, fields=None, namepath_segment=(), ):
|
||||
self.clear()
|
||||
if container or fields or namepath_segment:
|
||||
self.update(container, fields, namepath_segment)
|
||||
|
||||
def clear(self):
|
||||
self.fields = {}
|
||||
self.schemas = ['project']
|
||||
self.rank = 0
|
||||
self.code = 'untitled'
|
||||
self.container = None
|
||||
self.namepath_segment = []
|
||||
|
||||
def update(self, container=None, fields=None, namepath_segment=()):
|
||||
self.container = container
|
||||
|
||||
if namepath_segment:
|
||||
self.namepath_segment = namepath_segment
|
||||
else:
|
||||
self.namepath_segment = []
|
||||
|
||||
try:
|
||||
self.schemas = self.container.schemas
|
||||
except AttributeError:
|
||||
self.schemas = []
|
||||
|
||||
try:
|
||||
self.omit_ranks = self.container.omit_ranks
|
||||
except AttributeError:
|
||||
self.omit_ranks = {}
|
||||
self.omit_ranks.update({
|
||||
'edit': 0,
|
||||
'render': 1,
|
||||
'filename': 1,
|
||||
'scene': 3})
|
||||
|
||||
if fields:
|
||||
if isinstance(fields, dict):
|
||||
self.fields.update(fields)
|
||||
elif isinstance(fields, str):
|
||||
self.fields.update(yaml.safe_load(fields))
|
||||
|
||||
def update_fields(self, data):
|
||||
self.fields.update(data)
|
||||
|
||||
def _load_schemas(self, schemas, start=0):
|
||||
"""
|
||||
Load schemas from a list of schema dictionaries.
|
||||
|
||||
@schemas: list of dictionaries containing schema field data (see FieldSchema).
|
||||
The data will typically be extracted from YAML, and is
|
||||
expected to be a list of dictionaries, each of which defines
|
||||
fields understood by the FieldSchema class, to instantiate
|
||||
FieldSchema objects. The result is a linked chain of schemas from
|
||||
the top of the project tree down.
|
||||
|
||||
@start: if a start value is given, the top of the existing schema
|
||||
chain is kept, and the provided schemas starts under the rank of
|
||||
the start level in the existing schema. This is what happens when
|
||||
the schema is locally overridden at some point in the hierarchy.
|
||||
"""
|
||||
self.schemas = self.schemas[:start]
|
||||
if self.schemas:
|
||||
last = self.schemas[-1]
|
||||
else:
|
||||
last = None
|
||||
for schema in schemas:
|
||||
self.schemas.append(FieldSchema(last, schema['rank'], schema=schema))
|
||||
#last = self.schemas[-1]
|
||||
|
||||
def _parse_words(self, wordtext):
|
||||
words = []
|
||||
groups = re.split(r'[\W_]', wordtext)
|
||||
for group in groups:
|
||||
if len(group)>1:
|
||||
group = group[0].upper() + group[1:]
|
||||
words.extend(re.findall(r'[A-Z][a-z]*', group))
|
||||
elif len(group)==1:
|
||||
words.append(group[0].upper())
|
||||
else:
|
||||
continue
|
||||
return words
|
||||
|
||||
def _cap_words(self, words):
|
||||
return ''.join(w.capitalize() for w in words)
|
||||
|
||||
def _underlower_words(self, words):
|
||||
return '_'.join(w.lower() for w in words)
|
||||
|
||||
def _undercap_words(self, words):
|
||||
return '_'.join(w.capitalize() for w in words)
|
||||
|
||||
def _spacecap_words(self, words):
|
||||
return ' '.join(w.capitalize() for w in words)
|
||||
|
||||
def _compress_name(self, name):
|
||||
return self._cap_words(self._parse_words(name))
|
||||
|
||||
@property
|
||||
def namepath(self):
|
||||
if self.container:
|
||||
return self.container.namepath + self.namepath_segment
|
||||
else:
|
||||
return self.namepath_segment
|
||||
|
||||
@property
|
||||
def rank(self):
|
||||
if 'rank' in self.fields:
|
||||
return self.fields['rank']
|
||||
else:
|
||||
return None
|
||||
|
||||
@rank.setter
|
||||
def rank(self, rank):
|
||||
self.fields['rank'] = rank
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if 'name' in self.fields:
|
||||
return self.fields['name']
|
||||
elif 'title' in self.fields:
|
||||
return self._compress_name(self.fields['title'])
|
||||
# elif 'code' in self.fields:
|
||||
# return self.fields['code']
|
||||
else:
|
||||
return ''
|
||||
|
||||
@name.setter
|
||||
def name(self, name):
|
||||
self.fields['name'] = name
|
||||
|
||||
@property
|
||||
def code(self):
|
||||
if self.rank:
|
||||
return self.fields[self.rank]['code']
|
||||
else:
|
||||
return self.fields['code']
|
||||
|
||||
@code.setter
|
||||
def code(self, code):
|
||||
if self.rank:
|
||||
self.fields[self.rank] = {'code': code}
|
||||
else:
|
||||
self.fields['code'] = code
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
if 'description' in self.fields:
|
||||
return self.fields['description']
|
||||
else:
|
||||
return ''
|
||||
|
||||
@description.setter
|
||||
def description(self, description):
|
||||
self.fields['description'] = str(description)
|
||||
|
||||
def _get_name_components(self):
|
||||
components = []
|
||||
for code, schema in zip(self.namepath, self.schemas):
|
||||
if code is None: continue
|
||||
components.append(schema.format.format(code))
|
||||
components.append(schema.delimiter)
|
||||
return components[:-1]
|
||||
|
||||
@property
|
||||
def fullname(self):
|
||||
if self.name:
|
||||
return (self.designation +
|
||||
self.schemas[-1].delimiter +
|
||||
self._compress_name(self.name) )
|
||||
else:
|
||||
return self.designation
|
||||
|
||||
@property
|
||||
def designation(self):
|
||||
return ''.join(self._get_name_components())
|
||||
|
||||
@property
|
||||
def shortname(self):
|
||||
namebase = self.omit_ranks['filename']*2
|
||||
return (''.join(self._get_name_components()[namebase:]) +
|
||||
self.schemas[-1].delimiter +
|
||||
self._compress_name(self.name))
|
||||
|
||||
def get_scene_name(self, suffix=''):
|
||||
"""
|
||||
Create a name for the current scene, based on namepath.
|
||||
|
||||
Arguments:
|
||||
suffix (str): Optional suffix code used to improve
|
||||
identifications of scenes.
|
||||
"""
|
||||
namebase = self.omit_ranks['scene']*2
|
||||
desig = ''.join(self._get_name_components()[namebase:])
|
||||
|
||||
if suffix:
|
||||
return desig + ' ' + suffix
|
||||
else:
|
||||
return desig
|
||||
|
||||
def get_render_path(self, suffix='', framedigits=5, ext='png'):
|
||||
"""
|
||||
Create a render filepath, based on namepath and parameters.
|
||||
|
||||
Arguments:
|
||||
suffix (str):
|
||||
Optional unique code (usually for render profile).
|
||||
|
||||
framedigits (int):
|
||||
How many digits to reserve for frame number.
|
||||
|
||||
ext (str):
|
||||
Filetype extension for the render.
|
||||
|
||||
This is meant to be called by render_profile to combine namepath
|
||||
based name with parameters for the specific render, to uniquely
|
||||
idenfify movie or image-stream output.
|
||||
"""
|
||||
desig = ''.join(self._get_name_components()[self.omit_ranks['render']+1:])
|
||||
|
||||
if ext in ('avi', 'mov', 'mp4', 'mkv'):
|
||||
if suffix:
|
||||
path = os.path.join(self.render_root, suffix,
|
||||
desig + '-' + suffix + '.' + ext)
|
||||
else:
|
||||
path = os.path.join(self.render_root, ext.upper(),
|
||||
desig + '.' + ext)
|
||||
else:
|
||||
if suffix:
|
||||
path = os.path.join(self.render_root,
|
||||
suffix, desig,
|
||||
desig + '-' + suffix + '-f' + '#'*framedigits + '.' + ext)
|
||||
else:
|
||||
path = os.path.join(self.render_root,
|
||||
ext.upper(), desig,
|
||||
desig + '-f' + '#'*framedigits + '.' + ext)
|
||||
return path
|
286
abx/name_schema.py
Normal file
286
abx/name_schema.py
Normal file
@ -0,0 +1,286 @@
|
||||
# name_schema.py
|
||||
"""
|
||||
Object for managing schema directives from project YAML and applying them to parsing and mapping name fields.
|
||||
"""
|
||||
|
||||
import string, collections
|
||||
|
||||
from .ranks import RankNotFound, Rank, Branch, Trunk
|
||||
|
||||
class FieldSchema(object):
|
||||
"""
|
||||
Represents a schema used for parsing and constructing a field in names.
|
||||
|
||||
We need naming information in various formats, based on knowledge about
|
||||
the role of the Blender file and scene in the project. This object tracks
|
||||
this information and returns correct names based on it via properties.
|
||||
|
||||
Note that FieldSchema is NOT an individual project unit name, but a defined
|
||||
pattern for how names are treat at that level in the project. It is a class
|
||||
of names, not a name itself. Thus "shot" has a schema, but is distinct from
|
||||
"shot A" which is a particular "project unit". The job of the schema is
|
||||
to tell us things like "shots in this project will be represented by
|
||||
single capital letters".
|
||||
|
||||
See NameContext for the characteristics of a particular unit.
|
||||
|
||||
Attributes:
|
||||
codetype (type): Type of code name used for this rank.
|
||||
Usually it will be int, str, or Enum.
|
||||
Pre-defined enumerations are available
|
||||
for uppercase letters (_letters) and
|
||||
lowercase letters (_lowercase) (Roman --
|
||||
in principle, other alphabets could be added).
|
||||
|
||||
rank (Rank): Rank of hierarchy under project (which is 0). The
|
||||
rank value increases as you go "down" the tree.
|
||||
Sorry about that confusion.
|
||||
|
||||
|
||||
ranks (Branch): List of named ranks known to schema (may include
|
||||
both higher and lower ranks).
|
||||
|
||||
parent (|None):
|
||||
Earlier rank to which this schema is attached.
|
||||
|
||||
format (str): Code for formatting with Python str.format() method.
|
||||
Optional: format can also be specified with the
|
||||
following settings, or left to default formatting.
|
||||
|
||||
pad (str): Padding character.
|
||||
minlength (int): Minimum character length (0 means it may be empty).
|
||||
maxlength (int): Maximum character length (0 means no limit).
|
||||
|
||||
words (bool): Treat name/title fields like a collection of words,
|
||||
which can then be represented using "TitleCaps" or
|
||||
"underscore_spacing", etc for identifier use.
|
||||
|
||||
delimiter (str): Field delimiter marking the end of this ranks'
|
||||
code in designations. Note this is the delimiter
|
||||
after this rank - the higher (lower value) rank
|
||||
controls the delimiter used before it.
|
||||
|
||||
default: The default value for this rank. May be None,
|
||||
in which case, the rank will be treated as unset
|
||||
until a setting is made. The UI must provide a
|
||||
means to restore the unset value. Having no values
|
||||
set below a certain rank is how a NameContext's
|
||||
rank is determined.
|
||||
|
||||
Note that the rank may go back to a lower value than the schema's
|
||||
parent object in order to overwrite earlier schemas (for overriding a
|
||||
particular branch in the project) or (compare this to the use of '..'
|
||||
in operating system paths). Or it may skip a rank, indicating an
|
||||
implied intermediate value, which will be treated as having a fixed
|
||||
value. (I'm not certain I want that, but it would allow us to keep
|
||||
rank numbers synchronized better in parallel hierarchies in a project).
|
||||
|
||||
Note that schemas can be overridden at any level in a project by
|
||||
'project_schema' directives in unit YAML files, so it is possible to
|
||||
change the schema behavior locally. By design, only lower levels in
|
||||
the hierarchy (higher values of rank) can be affected by overrides.
|
||||
|
||||
This kind of use isn't fully developed yet, but the plan is to include
|
||||
things like managing 'Library' assets with a very different structure
|
||||
from shot files. This way, the project can split into 'Library' and
|
||||
'Episode' forks with completely different schemas for each.
|
||||
"""
|
||||
# Defaults
|
||||
_default_schema = {
|
||||
'delimiter':'-',
|
||||
|
||||
'type': 'string',
|
||||
'format':'{:s}',
|
||||
'minlength':1, # Must be at least one character
|
||||
'maxlength':0, # 0 means unlimited
|
||||
'words': False, # If true, treat value as words and spaces
|
||||
'pad': '0', # Left-padding character for fixed length
|
||||
'default': None,
|
||||
|
||||
'rank': 'project',
|
||||
'irank': 0,
|
||||
'ranks': ('series', 'episode', 'sequence',
|
||||
'block', 'camera', 'shot', 'element')
|
||||
}
|
||||
|
||||
# Really this is more like a set than a dictionary right now, but I
|
||||
# thought I might refactor to move the definitions into the dictionary:
|
||||
_codetypes = {
|
||||
'number':{},
|
||||
'string':{},
|
||||
'letter':{},
|
||||
'lowercase':{},
|
||||
}
|
||||
|
||||
_letter = tuple((A,A,A) for A in string.ascii_uppercase)
|
||||
_lowercase = tuple((a,a,a) for a in string.ascii_lowercase)
|
||||
|
||||
rank = 'project'
|
||||
irank = 0
|
||||
default = None
|
||||
|
||||
#ranks = ('project',)
|
||||
branch = Trunk
|
||||
|
||||
def __init__(self, parent=None, rank=None, schema=None, debug=False):
|
||||
"""
|
||||
Create a FieldSchema from schema data source.
|
||||
|
||||
FieldSchema is typically initialized based on data from YAML files
|
||||
within the project. This allows us to avoid encoding project structure
|
||||
into ABX, leaving how units are named up to the production designer.
|
||||
|
||||
If you want our suggestions, you can look at the "Lunatics!" project's
|
||||
'lunatics.yaml' file, or the 'myproject.yaml' file in the ABX source
|
||||
distribution.
|
||||
|
||||
Arguments:
|
||||
parent (FieldSchema):
|
||||
The level in the schema hierarchy above this one.
|
||||
Should be None if this is the top.
|
||||
|
||||
rank (int): The rank of this schema to be created.
|
||||
|
||||
schema (dict): Data defining the schema, typically loaded from a
|
||||
YAML file in the project.
|
||||
|
||||
debug (bool): Used only for testing. Turns on some verbose output
|
||||
about internal implementation.
|
||||
|
||||
Note that the 'rank' is specified because it may NOT be sequential from
|
||||
the parent schema.
|
||||
"""
|
||||
# Three types of schema data:
|
||||
|
||||
# Make sure schema is a copy -- no side effects!
|
||||
if not schema:
|
||||
schema = {}
|
||||
else:
|
||||
s = {}
|
||||
s.update(schema)
|
||||
schema = s
|
||||
|
||||
if not rank and 'rank' in schema:
|
||||
rank = schema['rank']
|
||||
|
||||
# Stepped down in rank from parent:
|
||||
self.parent = parent
|
||||
|
||||
if parent and rank:
|
||||
# Check rank is defined in parent ranks and use that
|
||||
# We can skip optional ranks
|
||||
if rank in parent.ranks:
|
||||
j = parent.ranks.index(rank)
|
||||
self.ranks = parent.ranks[j+1:]
|
||||
self.rank = rank
|
||||
else:
|
||||
# It's an error to ask for a rank that isn't defined
|
||||
raise RankNotFound(
|
||||
'"%s" not in defined ranks for "%s"' % (rank, parent))
|
||||
|
||||
elif parent and not rank:
|
||||
# By default, get the first rank below parent
|
||||
self.rank = parent.ranks[0]
|
||||
self.ranks = parent.ranks[1:]
|
||||
|
||||
elif rank and not parent:
|
||||
# With no parent, we're starting a new tree and renaming the root
|
||||
self.rank = rank
|
||||
self.ranks = self._default_schema['ranks']
|
||||
|
||||
else: # not rank and not parent:
|
||||
# New tree with default rank
|
||||
self.rank = self._default_schema['rank']
|
||||
self.ranks = self._default_schema['ranks']
|
||||
|
||||
# Directly inherited/acquired from parent
|
||||
# So far, only need a delimiter specified, but might be other stuff
|
||||
self.delimiter = self._default_schema['delimiter']
|
||||
if parent and parent.delimiter: self.delimiter = parent.delimiter
|
||||
|
||||
# Explicit override by the new schema:
|
||||
if 'ranks' in schema: self.ranks = schema['ranks']
|
||||
if 'delimiter' in schema: self.delimiter = schema['delimiter']
|
||||
if 'default' in schema:
|
||||
if schema['default'] == 'None':
|
||||
self.default = None
|
||||
else:
|
||||
self.default = schema['default']
|
||||
|
||||
# Default unless specified (i.e. not inherited from parent)
|
||||
newschema = {}
|
||||
newschema.update(self._default_schema)
|
||||
newschema.update(schema)
|
||||
|
||||
self.format = str(newschema['format'])
|
||||
|
||||
self.minlength = int(newschema['minlength'])
|
||||
self.maxlength = int(newschema['maxlength'])
|
||||
self.pad = str(newschema['pad'])
|
||||
self.words = bool(newschema['words'])
|
||||
|
||||
if newschema['type'] == 'letter':
|
||||
self.codetype = self._letter
|
||||
|
||||
elif newschema['type'] == 'lowercase':
|
||||
self.codetype = self._lowercase
|
||||
|
||||
elif newschema['type'] == 'number':
|
||||
# Recognized Python types
|
||||
self.codetype = int
|
||||
if 'minlength' or 'maxlength' in schema:
|
||||
self.format = '{:0>%dd}' % self.minlength
|
||||
|
||||
elif newschema['type'] == 'string':
|
||||
self.codetype = str
|
||||
|
||||
if ('minlength' in schema) or ('maxlength' in schema):
|
||||
if self.maxlength == 0:
|
||||
# Special case for unlimited length
|
||||
self.format = '{:%1.1s>%ds}' % (self.pad, self.minlength)
|
||||
self.format = '{:%1.1s>%d.%ds}' % (
|
||||
self. pad, self.minlength, self.maxlength)
|
||||
|
||||
elif newschema['type'] == 'bool':
|
||||
self.codetype = bool
|
||||
|
||||
elif isinstance(newschema['type'], collections.Sequence):
|
||||
# Enumerated types
|
||||
# This is somewhat specific to Blender -- setting the
|
||||
# enumeration values requires a sequence in a particular format
|
||||
self.codetype = []
|
||||
for option in newschema['type']:
|
||||
if type(option) is not str and isinstance(option, collections.Sequence):
|
||||
option = tuple([str(e) for e in option][:3])
|
||||
else:
|
||||
option = (str(option), str(option), str(option))
|
||||
self.codetype.append(option)
|
||||
|
||||
elif isinstance(newschema['type'], collections.Mapping):
|
||||
self.codetype = []
|
||||
for key, val in newschema['type'].items():
|
||||
if type(val) is not str and isinstance(val, collections.Sequence):
|
||||
if len(val) == 0:
|
||||
option = (str(key), str(key), str(key))
|
||||
elif len(val) == 1:
|
||||
option = (str(key), str(val[0]), str(val[0]))
|
||||
else:
|
||||
option = (str(key), str(val[0]), str(val[1]))
|
||||
else:
|
||||
option = (str(key), str(val), str(val))
|
||||
self.codetype.append(option)
|
||||
else:
|
||||
# If all else fails
|
||||
self.codetype = None
|
||||
|
||||
def __repr__(self):
|
||||
return('<(%s).FieldSchema: %s (%s, %s, %s, (%s))>' % (
|
||||
repr(self.parent),
|
||||
#self.irank,
|
||||
self.rank,
|
||||
self.delimiter,
|
||||
self.default,
|
||||
self.format,
|
||||
self.codetype
|
||||
))
|
||||
|
16
abx/parsers/__init__.py
Normal file
16
abx/parsers/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
# parsers (sub-package)
|
||||
"""
|
||||
Filename Parsers & Registry for FileContext.
|
||||
"""
|
||||
|
||||
NameParsers = {} # Parser registry
|
||||
|
||||
def registered_parser(parser):
|
||||
"""
|
||||
Decorator function to register a parser class.
|
||||
"""
|
||||
NameParsers[parser.name] = parser
|
||||
return parser
|
||||
|
||||
from . import abx_episode, abx_fallback, abx_schema
|
||||
|
BIN
abx/parsers/__pycache__/__init__.cpython-35.pyc
Normal file
BIN
abx/parsers/__pycache__/__init__.cpython-35.pyc
Normal file
Binary file not shown.
BIN
abx/parsers/__pycache__/abx_episode.cpython-35.pyc
Normal file
BIN
abx/parsers/__pycache__/abx_episode.cpython-35.pyc
Normal file
Binary file not shown.
BIN
abx/parsers/__pycache__/abx_fallback.cpython-35.pyc
Normal file
BIN
abx/parsers/__pycache__/abx_fallback.cpython-35.pyc
Normal file
Binary file not shown.
BIN
abx/parsers/__pycache__/abx_schema.cpython-35.pyc
Normal file
BIN
abx/parsers/__pycache__/abx_schema.cpython-35.pyc
Normal file
Binary file not shown.
204
abx/parsers/abx_episode.py
Normal file
204
abx/parsers/abx_episode.py
Normal file
@ -0,0 +1,204 @@
|
||||
# abx_episode.py
|
||||
"""
|
||||
Custom parser written for "Lunatics!" Project Episode files.
|
||||
|
||||
Superseded by 'abx_schema' parser (probably).
|
||||
"""
|
||||
|
||||
import re, copy
|
||||
|
||||
from . import registered_parser
|
||||
|
||||
wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)')
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Episode:
|
||||
"""
|
||||
Original "Lunatics!" filename parsing algorithm. (DEPRECATED)
|
||||
|
||||
This parser was written before the Schema parser. It hard-codes the schema used
|
||||
in the "Lunatics!" Project, and can probably be safely replaced by using the Schema
|
||||
parser with appropriate YAML settings in the <project>.yaml file, which also allows
|
||||
much more flexibility in naming schemes.
|
||||
|
||||
YAML parameter settings available for this parser:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_episode # Force use of this parser
|
||||
|
||||
parser_options: # Available settings (w/ defaults)
|
||||
field_separator: '-'
|
||||
episode_separator: 'E'
|
||||
filetype_separator: '.'
|
||||
|
||||
Filetypes and roles are hard-code, and can't be changed from the YAML.
|
||||
|
||||
Assumes field-based filenames of the form:
|
||||
|
||||
<series>E<episode>[-<seq>[-<block>[-Cam<camera>][-<shot>]]][-<title>]-<role>.<filetype>
|
||||
|
||||
Where the <field> indicates fields with fieldnames, and there are three expected separators:
|
||||
|
||||
- is the 'field_separator'
|
||||
E is the 'episode_separator'
|
||||
. is the 'filetype_separator'
|
||||
|
||||
(These can be overridden in the initialization).
|
||||
The class is callable, taking a string as input and returning a dictionary of fields.
|
||||
"""
|
||||
name = 'abx_episode'
|
||||
|
||||
max_score = 10 # Maximum number of fields parsed
|
||||
|
||||
# supported values for filetype
|
||||
filetypes = {
|
||||
'blend': "Blender File",
|
||||
'kdenlive': "Kdenlive Video Editor File",
|
||||
'mlt': "Kdenlive Video Mix Script",
|
||||
'svg': "Scalable Vector Graphics (Inkscape)",
|
||||
'kra': "Krita Graphic File",
|
||||
'xcf': "Gimp Graphic File",
|
||||
'png': "Portable Network Graphics (PNG) Image",
|
||||
'jpg': "Joint Photographic Experts Group (JPEG) Image",
|
||||
'aup': "Audacity Project",
|
||||
'ardour': "Ardour Project",
|
||||
'flac': "Free Lossless Audio Codec (FLAC)",
|
||||
'mp3': "MPEG Audio Layer III (MP3) Audio File",
|
||||
'ogg': "Ogg Vorbis Audio File",
|
||||
'avi': "Audio Video Interleave (AVI) Video Container",
|
||||
'mkv': "Matroska Video Container",
|
||||
'mp4': "Moving Picture Experts Group (MPEG) 4 Format}",
|
||||
'txt': "Plain Text File"
|
||||
}
|
||||
|
||||
# Roles that make sense in an episode context
|
||||
roles = {
|
||||
'extras': "Extras, crowds, auxillary animated movement",
|
||||
'mech': "Mechanical animation",
|
||||
'anim': "Character animation",
|
||||
'cam': "Camera direction",
|
||||
'vfx': "Visual special effects",
|
||||
'compos': "Compositing",
|
||||
'bkg': "Background 2D image",
|
||||
'bb': "Billboard 2D image",
|
||||
'tex': "Texture 2D image",
|
||||
'foley': "Foley sound",
|
||||
'voice': "Voice recording",
|
||||
'fx': "Sound effects",
|
||||
'music': "Music track",
|
||||
'cue': "Musical cue",
|
||||
'amb': "Ambient sound",
|
||||
'loop': "Ambient sound loop",
|
||||
'edit': "Video edit"
|
||||
}
|
||||
|
||||
# A few filetypes imply their roles:
|
||||
roles_by_filetype = {
|
||||
'kdenlive': 'edit',
|
||||
'mlt': 'edit'
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, field_separator='-', episode_separator='E', filetype_separator='.',
|
||||
fields=None, filetypes=None, roles=None, **kwargs):
|
||||
if not fields:
|
||||
fields = {}
|
||||
if filetypes:
|
||||
self.filetypes = copy.deepcopy(self.filetypes) # Copy class attribute to instance
|
||||
self.filetypes.update(filetypes) # Update with new values
|
||||
if roles:
|
||||
self.roles = copy.deepcopy(self.roles) # Copy class attribute to instance
|
||||
self.roles.update(roles) # Update with new values
|
||||
self.field_separator = field_separator
|
||||
self.episode_separator = episode_separator
|
||||
self.filetype_separator = filetype_separator
|
||||
|
||||
def __call__(self, filename, namepath):
|
||||
score = 0.0
|
||||
fielddata = {}
|
||||
|
||||
# Check for filetype ending
|
||||
i_filetype = filename.rfind(self.filetype_separator)
|
||||
if i_filetype < 0:
|
||||
fielddata['filetype'] = None
|
||||
else:
|
||||
fielddata['filetype'] = filename[i_filetype+1:]
|
||||
filename = filename[:i_filetype]
|
||||
score = score + 1.0
|
||||
|
||||
components = filename.split(self.field_separator)
|
||||
|
||||
# Check for role marker in last component
|
||||
if components[-1] in self.roles:
|
||||
fielddata['role'] = components[-1]
|
||||
del components[-1]
|
||||
fielddata['hierarchy'] = 'episode'
|
||||
score = score + 2.0
|
||||
elif fielddata['filetype'] in self.roles_by_filetype:
|
||||
fielddata['role'] = self.roles_by_filetype[fielddata['filetype']]
|
||||
fielddata['hierarchy'] = 'episode'
|
||||
else:
|
||||
fielddata['role'] = None
|
||||
fielddata['hierarchy'] = None
|
||||
|
||||
# Check for a descriptive title (must be 3+ characters in length)
|
||||
if components and len(components[-1])>2:
|
||||
# Normalize the title as words with spaces
|
||||
title = ' '.join(w for w in wordre.split(components[-1]) if wordre.fullmatch(w))
|
||||
del components[-1]
|
||||
score = score + 1.0
|
||||
else:
|
||||
title = None
|
||||
|
||||
# Check if first field contains series/episode number
|
||||
if components:
|
||||
prefix = components[0]
|
||||
try:
|
||||
fielddata['series'] = {}
|
||||
fielddata['episode'] = {}
|
||||
fielddata['series']['code'], episode_id = prefix.split(self.episode_separator)
|
||||
fielddata['episode']['code'] = int(episode_id)
|
||||
fielddata['rank'] = 'episode'
|
||||
del components[0]
|
||||
score = score + 2.0
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check for sequence/block/shot/camera designations
|
||||
if components:
|
||||
fielddata['seq'] = {}
|
||||
fielddata['seq']['code'] = components[0]
|
||||
fielddata['rank'] = 'seq'
|
||||
del components[0]
|
||||
score = score + 1.0
|
||||
|
||||
if components:
|
||||
try:
|
||||
fielddata['block'] = {}
|
||||
fielddata['block']['code'] = int(components[0])
|
||||
del components[0]
|
||||
fielddata['rank'] = 'block'
|
||||
score = score + 1.0
|
||||
except:
|
||||
pass
|
||||
|
||||
if components and components[0].startswith('Cam'):
|
||||
fielddata['camera'] = {}
|
||||
fielddata['camera']['code'] = components[0][len('Cam'):]
|
||||
fielddata['rank'] = 'camera'
|
||||
del components[0]
|
||||
score = score + 1.0
|
||||
|
||||
if components:
|
||||
# Any remaining structure is joined back to make the shot ID
|
||||
fielddata['shot'] = {}
|
||||
fielddata['shot']['code'] = ''.join(components)
|
||||
fielddata['rank'] = 'shot'
|
||||
components = None
|
||||
score = score + 1.0
|
||||
|
||||
if title and fielddata['rank'] in fielddata:
|
||||
fielddata[fielddata['rank']]['title'] = title
|
||||
|
||||
return score/self.max_score, fielddata
|
105
abx/parsers/abx_fallback.py
Normal file
105
abx/parsers/abx_fallback.py
Normal file
@ -0,0 +1,105 @@
|
||||
# abx_fallback.py
|
||||
"""
|
||||
Fallback parser used in case others fail.
|
||||
|
||||
The fallback parser makes only a very minimal and robust set of assumptions.
|
||||
|
||||
Any legal filename will successfully return a simple parse, though much
|
||||
interpretation may be lost. It still allows for common field-based practices,
|
||||
but falls back on using the unaltered filename if necessary.
|
||||
"""
|
||||
|
||||
import re, os
|
||||
|
||||
import yaml
|
||||
|
||||
from . import registered_parser
|
||||
|
||||
|
||||
DEFAULT_YAML = {}
|
||||
with open(os.path.join(os.path.dirname(__file__), '..', 'abx.yaml')) as def_yaml_file:
|
||||
DEFAULT_YAML.update(yaml.safe_load(def_yaml_file))
|
||||
|
||||
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Fallback(object):
|
||||
"""
|
||||
Highly-tolerant parser to fall back on if others fail.
|
||||
|
||||
Makes very minimal assumptions about filename structure.
|
||||
|
||||
YAML options available:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_fallback # Force use of this parser.
|
||||
|
||||
There are no other options. Field separators are defined very broadly,
|
||||
and include most non-word characters (~#$!=+&_-). This was mostly designed
|
||||
to work without a project schema available.
|
||||
"""
|
||||
name = 'abx_fallback'
|
||||
|
||||
filetypes = DEFAULT_YAML['definitions']['filetypes']
|
||||
roles = DEFAULT_YAML['definitions']['roles']
|
||||
roles_by_filetype = (
|
||||
DEFAULT_YAML['definitions']['roles_by_filetype'])
|
||||
|
||||
main_sep_re = re.compile(r'\W+') # Any single non-word char
|
||||
comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+')
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
def _parse_ending(self, filename, separator):
|
||||
try:
|
||||
remainder, suffix = filename.rsplit(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
remainder = filename
|
||||
suffix = None
|
||||
score = 0.0
|
||||
return (suffix, remainder, score)
|
||||
|
||||
def __call__(self, filename, namepath):
|
||||
fields = {}
|
||||
score = 1.0
|
||||
possible = 4.5
|
||||
|
||||
split = filename.rsplit('.', 1)
|
||||
if len(split)<2 or split[1] not in self.filetypes:
|
||||
fields['filetype'] = None
|
||||
remainder = filename
|
||||
score += 1.0
|
||||
else:
|
||||
fields['filetype'] = split[1]
|
||||
remainder = split[0]
|
||||
|
||||
comment_match = self.comment_sep_re.search(remainder)
|
||||
if comment_match:
|
||||
fields['comment'] = remainder[comment_match.end():]
|
||||
remainder = remainder[:comment_match.start()]
|
||||
else:
|
||||
fields['comment'] = None
|
||||
|
||||
role = self.main_sep_re.split(remainder)[-1]
|
||||
if role in self.roles:
|
||||
fields['role'] = role
|
||||
remainder = remainder[:-1-len(role)]
|
||||
score += 1.0
|
||||
else:
|
||||
fields['role'] = None
|
||||
|
||||
# Implied role
|
||||
if fields['filetype'] in self.roles_by_filetype:
|
||||
fields['role'] = self.roles_by_filetype[fields['filetype']]
|
||||
score += 1.0
|
||||
|
||||
words = self.main_sep_re.split(remainder)
|
||||
fields['code'] = ''.join([w.capitalize() for w in words])
|
||||
fields['title'] = remainder
|
||||
|
||||
return score/possible, fields
|
||||
|
189
abx/parsers/abx_schema.py
Normal file
189
abx/parsers/abx_schema.py
Normal file
@ -0,0 +1,189 @@
|
||||
# abx_schema.py
|
||||
"""
|
||||
Generalized fields-based parser based on provided schema.
|
||||
|
||||
Expands on the 'abx_episode' parser by allowing all the schema to
|
||||
be defined by outside configuration data (generally provided in a
|
||||
project YAML file, but this module does not depend on the data
|
||||
source used).
|
||||
"""
|
||||
|
||||
from . import registered_parser
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Schema(object):
|
||||
"""
|
||||
Parser based on using the list of schemas.
|
||||
The schemas are normally defined in the project root directory YAML.
|
||||
|
||||
The project YAML can additionally control parsing with this parser:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_schema # Force use of this parser
|
||||
|
||||
parser_options: # Set parameters
|
||||
filetype_separator: '.'
|
||||
comment_separator: '--'
|
||||
role_separator: '-'
|
||||
title_separator: '-'
|
||||
|
||||
filetypes: # Recognized filetypes.
|
||||
blend: Blender File # <filetype>: documentation
|
||||
...
|
||||
|
||||
roles: # Recognized role fields.
|
||||
anim: Character Animation # <role>: documentation
|
||||
...
|
||||
|
||||
roles_by_filetype: # Roles implied by filetype.
|
||||
kdenlive: edit # <filetype>:<role>
|
||||
...
|
||||
|
||||
(For the full default lists see abx/abx.yaml).
|
||||
|
||||
schemas (list): The current schema-list defining how filenames should be parsed.
|
||||
This "Schema" parser uses this to determine both parsing and
|
||||
mapping of text fields in the filename.
|
||||
|
||||
definitions(dict): The project definitions currently visible to the parser.
|
||||
"""
|
||||
name = 'abx_schema'
|
||||
|
||||
def __init__(self, schemas=None, definitions=None,
|
||||
filetype_separator = '.',
|
||||
comment_separator = '--',
|
||||
role_separator = '-',
|
||||
title_separator = '-',
|
||||
**kwargs):
|
||||
|
||||
self.filetype_separator = filetype_separator
|
||||
self.comment_separator = comment_separator
|
||||
self.role_separator = role_separator
|
||||
self.title_separator = title_separator
|
||||
|
||||
self.schemas = schemas
|
||||
|
||||
if 'roles' in definitions:
|
||||
self.roles = definitions['roles']
|
||||
else:
|
||||
self.roles = []
|
||||
|
||||
if 'filetypes' in definitions:
|
||||
self.filetypes = definitions['filetypes']
|
||||
else:
|
||||
self.filetypes = []
|
||||
|
||||
if 'roles_by_filetype' in definitions:
|
||||
self.roles_by_filetype = definitions['roles_by_filetype']
|
||||
else:
|
||||
self.roles_by_filetype = []
|
||||
|
||||
def _parse_ending(self, filename, separator):
|
||||
try:
|
||||
remainder, suffix = filename.rsplit(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
remainder = filename
|
||||
suffix = None
|
||||
score = 0.0
|
||||
return (suffix, remainder, score)
|
||||
|
||||
def _parse_beginning(self, filename, separator):
|
||||
try:
|
||||
prefix, remainder = filename.split(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
prefix = filename
|
||||
remainder = ''
|
||||
score = 0.0
|
||||
return (prefix, remainder, score)
|
||||
|
||||
def __call__ (self, filename, namepath, debug=False):
|
||||
fields = {}
|
||||
score = 0.0
|
||||
possible = 0.0
|
||||
|
||||
# First get specially-handled extensions
|
||||
remainder = filename
|
||||
field, newremainder, s = self._parse_ending(remainder, self.filetype_separator)
|
||||
if field and field in self.filetypes:
|
||||
remainder = newremainder
|
||||
fields['filetype'] = field
|
||||
score += s*1.0
|
||||
else:
|
||||
fields['filetype'] = None
|
||||
|
||||
field, remainder, s = self._parse_ending(remainder, self.comment_separator)
|
||||
fields['comment'] = field
|
||||
score += s*0.5
|
||||
|
||||
field, newremainder, s = self._parse_ending(remainder, self.role_separator)
|
||||
if field and field in self.roles:
|
||||
remainder = newremainder
|
||||
fields['role'] = field
|
||||
score += s*0.5
|
||||
else:
|
||||
fields['role'] = None
|
||||
|
||||
field, remainder, s = self._parse_ending(remainder, self.title_separator)
|
||||
fields['title'] = field
|
||||
score += s*0.5
|
||||
|
||||
possible += 3.0
|
||||
|
||||
# Implicit roles
|
||||
if ( not fields['role'] and
|
||||
fields['filetype'] and
|
||||
fields['role'] in self.roles_by_filetype):
|
||||
self.role = self.roles_by_filetype[fields['filetype']]
|
||||
score += 0.2
|
||||
|
||||
#possible += 0.2
|
||||
|
||||
# Figure the rest out from the schema
|
||||
# Find the matching rank start position for the filename
|
||||
start = 0
|
||||
for start, (schema, name) in enumerate(zip(self.schemas, namepath)):
|
||||
field, r, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
try:
|
||||
if field.lower() == schema.format.format(name).lower():
|
||||
score += 1.0
|
||||
break
|
||||
except ValueError:
|
||||
print(' (365) field, format', field, schema.format)
|
||||
|
||||
possible += 1.0
|
||||
|
||||
# Starting from that position, try to match fields
|
||||
# up to the end of the namepath (checking against it)
|
||||
irank = 0
|
||||
for irank, (schema, name) in enumerate(
|
||||
zip(self.schemas[start:], namepath[start:])):
|
||||
if not remainder: break
|
||||
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
score += s
|
||||
try:
|
||||
if ( type(field) == str and
|
||||
field.lower() == schema.format.format(name).lower()):
|
||||
fields[schema.rank]={'code':field}
|
||||
fields['rank'] = schema.rank
|
||||
score += 1.0
|
||||
except ValueError:
|
||||
print(' (384) field, format', field, schema.format)
|
||||
possible += 2.0
|
||||
|
||||
# Remaining fields are authoritative (doesn't affect score)
|
||||
for schema in self.schemas[irank:]:
|
||||
if not remainder: break
|
||||
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
fields[schema.rank]={'code':field}
|
||||
fields['rank'] = schema.rank
|
||||
|
||||
if 'rank' in fields:
|
||||
fields[fields['rank']]['title'] = fields['title']
|
||||
|
||||
if not fields['role'] and fields['filetype'] in self.roles_by_filetype:
|
||||
fields['role'] = self.roles_by_filetype[fields['filetype']]
|
||||
|
||||
return score/possible, fields
|
20
abx/ranks.py
20
abx/ranks.py
@ -7,7 +7,11 @@ possibility of branching at nodes with redefined ranks via
|
||||
the 'project_schema' directives in project YAML files.
|
||||
"""
|
||||
|
||||
import numbers
|
||||
class RankNotFound(LookupError):
|
||||
"""
|
||||
Error returned if an unexpected 'rank' is encountered.
|
||||
"""
|
||||
pass
|
||||
|
||||
class Branch(object):
|
||||
"""
|
||||
@ -37,7 +41,7 @@ class Branch(object):
|
||||
if self.code:
|
||||
code = self.code
|
||||
else:
|
||||
code = 'trunk'
|
||||
code = 'Trunk'
|
||||
return "<branch '%s': %s>" % (code, ranklist)
|
||||
|
||||
def __contains__(self, other):
|
||||
@ -53,9 +57,13 @@ class Branch(object):
|
||||
if isinstance(n, int) and 0 < n < len(self._ranks):
|
||||
return self._ranks[n]
|
||||
elif isinstance(n, str):
|
||||
if n.lower()=='trunk':
|
||||
return self._ranks[0]
|
||||
for rank in self._ranks:
|
||||
if str(rank) == n:
|
||||
return rank
|
||||
elif n==0:
|
||||
self._ranks[0]
|
||||
else:
|
||||
raise TypeError
|
||||
|
||||
@ -168,7 +176,7 @@ class Rank(object):
|
||||
if (self.num + other) < len(self.branch.ranks):
|
||||
return self.branch.ranks[self.num+other]
|
||||
elif (self.num + other) < 1:
|
||||
return trunk
|
||||
return Trunk
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
@ -184,7 +192,7 @@ class Rank(object):
|
||||
if 0 < (self.num - other) < len(self.branch.ranks):
|
||||
return self.branch.ranks[self.num-other]
|
||||
elif (self.num - other) < 1:
|
||||
return trunk
|
||||
return Trunk
|
||||
elif (self.num - other) > len(self.branch.ranks):
|
||||
return None
|
||||
else:
|
||||
@ -241,7 +249,7 @@ class RankList(list):
|
||||
return super().__getitem__(i)
|
||||
|
||||
|
||||
# Define the trunk branch object
|
||||
# Define the Trunk branch object
|
||||
# This schema will make sense for any unaffiliated Blender document,
|
||||
# even if it hasn't been saved as a file yet:
|
||||
trunk = Branch(None, '', 0, ('', 'file', 'scene'))
|
||||
Trunk = Branch(None, '', 0, ('', 'file', 'scene'))
|
||||
|
@ -521,7 +521,7 @@ class Parser_ABX_Fallback(object):
|
||||
class RankNotFound(LookupError):
|
||||
pass
|
||||
|
||||
class NameSchema(object):
|
||||
class FieldSchema(object):
|
||||
"""
|
||||
Represents a schema used for parsing and constructing designations, names, etc.
|
||||
"""
|
||||
@ -686,7 +686,7 @@ class NameSchema(object):
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return('<(%s).NameSchema: %s (%s, %s, %s, (%s))>' % (
|
||||
return('<(%s).FieldSchema: %s (%s, %s, %s, (%s))>' % (
|
||||
repr(self.parent),
|
||||
#self.irank,
|
||||
self.rank,
|
||||
@ -751,11 +751,11 @@ class NameContext(object):
|
||||
"""
|
||||
Load schemas from a list of schema dictionaries.
|
||||
|
||||
@schemas: list of dictionaries containing schema field data (see NameSchema).
|
||||
@schemas: list of dictionaries containing schema field data (see FieldSchema).
|
||||
The data will typically be extracted from YAML, and is
|
||||
expected to be a list of dictionaries, each of which defines
|
||||
fields understood by the NameSchema class, to instantiate
|
||||
NameSchema objects. The result is a linked chain of schemas from
|
||||
fields understood by the FieldSchema class, to instantiate
|
||||
FieldSchema objects. The result is a linked chain of schemas from
|
||||
the top of the project tree down.
|
||||
|
||||
@start: if a start value is given, the top of the existing schema
|
||||
@ -769,7 +769,7 @@ class NameContext(object):
|
||||
else:
|
||||
last = None
|
||||
for schema in schemas:
|
||||
self.schemas.append(NameSchema(last, schema['rank'], schema=schema))
|
||||
self.schemas.append(FieldSchema(last, schema['rank'], schema=schema))
|
||||
#last = self.schemas[-1]
|
||||
|
||||
def _parse_words(self, wordtext):
|
||||
|
Binary file not shown.
@ -63,9 +63,9 @@ class FileContext_Utilities_Tests(unittest.TestCase):
|
||||
|
||||
class FileContext_NameSchema_Interface_Tests(unittest.TestCase):
|
||||
"""
|
||||
Test the interfaces presented by NameSchema.
|
||||
Test the interfaces presented by FieldSchema.
|
||||
|
||||
NameSchema is not really intended to be used from outside the
|
||||
FieldSchema is not really intended to be used from outside the
|
||||
file_context module, but it is critical to the behavior of the
|
||||
module, so I want to make sure it's working as expected.
|
||||
"""
|
||||
@ -89,58 +89,6 @@ class FileContext_NameSchema_Interface_Tests(unittest.TestCase):
|
||||
{'rank': 'shot', 'delimiter':'-', 'format':'{!s:s}'},
|
||||
{'rank': 'element', 'delimiter':'-', 'format':'{!s:s}'}]
|
||||
|
||||
def test_NameSchema_create_single(self):
|
||||
ns = file_context.NameSchema(schema = self.TESTSCHEMA_LIST[0])
|
||||
|
||||
# Test for ALL the expected properties:
|
||||
|
||||
# Set by the test schema
|
||||
self.assertEqual(ns.rank, 'project')
|
||||
self.assertEqual(ns.delimiter, '-')
|
||||
self.assertEqual(ns.format, '{:s}')
|
||||
self.assertEqual(ns.words, True)
|
||||
self.assertEqual(ns.codetype, str)
|
||||
|
||||
# Default values
|
||||
self.assertEqual(ns.pad, '0')
|
||||
self.assertEqual(ns.minlength, 1)
|
||||
self.assertEqual(ns.maxlength, 0)
|
||||
self.assertEqual(ns.default, None)
|
||||
|
||||
# Candidates for removal:
|
||||
self.assertEqual(ns.irank, 0) # Is this used at all?
|
||||
self.assertEqual(ns.parent, None)
|
||||
self.assertListEqual(list(ns.ranks),
|
||||
['series', 'episode', 'sequence',
|
||||
'block', 'camera', 'shot', 'element'])
|
||||
|
||||
def test_NameSchema_load_chain_from_project_yaml(self):
|
||||
with open(self.TESTPROJECTYAML, 'rt') as yaml_file:
|
||||
data = yaml.safe_load(yaml_file)
|
||||
schema_dicts = data['project_schema']
|
||||
|
||||
schema_chain = []
|
||||
last = None
|
||||
for schema_dict in schema_dicts:
|
||||
rank = schema_dict['rank']
|
||||
parent = last
|
||||
schema_chain.append(file_context.NameSchema(
|
||||
parent = parent,
|
||||
rank = rank,
|
||||
schema = schema_dict))
|
||||
last = schema_chain[-1]
|
||||
|
||||
#print( schema_chain )
|
||||
|
||||
self.assertEqual(len(schema_chain), 8)
|
||||
|
||||
self.assertEqual(
|
||||
schema_chain[-1].parent.parent.parent.parent.parent.parent.parent.rank,
|
||||
'project')
|
||||
|
||||
self.assertEqual(schema_chain[5].rank, 'camera')
|
||||
self.assertEqual(schema_chain[5].codetype[1], ('c2', 'c2', 'c2'))
|
||||
|
||||
|
||||
|
||||
|
||||
@ -218,7 +166,7 @@ class FileContext_Parser_UnitTests(unittest.TestCase):
|
||||
'A.001-LP-1-BeginningOfEnd-anim.txt')
|
||||
|
||||
def setUp(self):
|
||||
self.TESTSCHEMAS = [file_context.NameSchema( #rank=s['rank'],
|
||||
self.TESTSCHEMAS = [file_context.FieldSchema( #rank=s['rank'],
|
||||
schema=s)
|
||||
for s in self.TESTSCHEMA_LIST]
|
||||
|
||||
|
100
tests/test_name_schema.py
Normal file
100
tests/test_name_schema.py
Normal file
@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the file_context module.
|
||||
|
||||
This was written well after I wrote the module, and starts out as a conversion
|
||||
from the doctests I had in the module already.
|
||||
"""
|
||||
|
||||
|
||||
import unittest, os
|
||||
import yaml
|
||||
|
||||
import sys
|
||||
print("__file__ = ", __file__)
|
||||
sys.path.append(os.path.normpath(os.path.join(__file__, '..', '..')))
|
||||
|
||||
from abx import name_schema
|
||||
|
||||
class FileContext_NameSchema_Interface_Tests(unittest.TestCase):
|
||||
"""
|
||||
Test the interfaces presented by FieldSchema.
|
||||
|
||||
FieldSchema is not really intended to be used from outside the
|
||||
file_context module, but it is critical to the behavior of the
|
||||
module, so I want to make sure it's working as expected.
|
||||
"""
|
||||
TESTDATA = os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), '..', 'testdata'))
|
||||
|
||||
TESTPROJECTYAML = os.path.join(TESTDATA, 'myproject', 'myproject.yaml')
|
||||
|
||||
TESTPATH = os.path.join(TESTDATA, 'myproject/Episodes/' +
|
||||
'A.001-Pilot/Seq/LP-LastPoint/' +
|
||||
'A.001-LP-1-BeginningOfEnd-anim.txt')
|
||||
|
||||
|
||||
# Normally from 'project_schema' in YAML
|
||||
TESTSCHEMA_LIST =[
|
||||
{'rank': 'project', 'delimiter':'-', 'format':'{:s}', 'words':True},
|
||||
{'rank': 'series', 'delimiter':'E', 'format':'{:2s}'},
|
||||
{'rank': 'episode', 'delimiter':'-', 'format':'{!s:>02s}'},
|
||||
{'rank': 'sequence','delimiter':'-', 'format':'{:2s}'},
|
||||
{'rank': 'block', 'delimiter':'-', 'format':'{!s:1s}'},
|
||||
{'rank': 'shot', 'delimiter':'-', 'format':'{!s:s}'},
|
||||
{'rank': 'element', 'delimiter':'-', 'format':'{!s:s}'}]
|
||||
|
||||
def test_NameSchema_create_single(self):
|
||||
ns = name_schema.FieldSchema(schema = self.TESTSCHEMA_LIST[0])
|
||||
|
||||
# Test for ALL the expected properties:
|
||||
|
||||
# Set by the test schema
|
||||
self.assertEqual(ns.rank, 'project')
|
||||
self.assertEqual(ns.delimiter, '-')
|
||||
self.assertEqual(ns.format, '{:s}')
|
||||
self.assertEqual(ns.words, True)
|
||||
self.assertEqual(ns.codetype, str)
|
||||
|
||||
# Default values
|
||||
self.assertEqual(ns.pad, '0')
|
||||
self.assertEqual(ns.minlength, 1)
|
||||
self.assertEqual(ns.maxlength, 0)
|
||||
self.assertEqual(ns.default, None)
|
||||
|
||||
# Candidates for removal:
|
||||
self.assertEqual(ns.irank, 0) # Is this used at all?
|
||||
self.assertEqual(ns.parent, None)
|
||||
self.assertListEqual(list(ns.ranks),
|
||||
['series', 'episode', 'sequence',
|
||||
'block', 'camera', 'shot', 'element'])
|
||||
|
||||
def test_NameSchema_load_chain_from_project_yaml(self):
|
||||
with open(self.TESTPROJECTYAML, 'rt') as yaml_file:
|
||||
data = yaml.safe_load(yaml_file)
|
||||
schema_dicts = data['project_schema']
|
||||
|
||||
schema_chain = []
|
||||
last = None
|
||||
for schema_dict in schema_dicts:
|
||||
rank = schema_dict['rank']
|
||||
parent = last
|
||||
schema_chain.append(name_schema.FieldSchema(
|
||||
parent = parent,
|
||||
rank = rank,
|
||||
schema = schema_dict))
|
||||
last = schema_chain[-1]
|
||||
|
||||
#print( schema_chain )
|
||||
|
||||
self.assertEqual(len(schema_chain), 8)
|
||||
|
||||
self.assertEqual(
|
||||
schema_chain[-1].parent.parent.parent.parent.parent.parent.parent.rank,
|
||||
'project')
|
||||
|
||||
self.assertEqual(schema_chain[5].rank, 'camera')
|
||||
self.assertEqual(schema_chain[5].codetype[1], ('c2', 'c2', 'c2'))
|
||||
|
||||
|
||||
|
@ -12,17 +12,17 @@ from abx import ranks
|
||||
|
||||
class BranchTests(unittest.TestCase):
|
||||
def test_trunk_branch(self):
|
||||
t = ranks.trunk.rank('')
|
||||
f = ranks.trunk.rank('file')
|
||||
s = ranks.trunk.rank('scene')
|
||||
self.assertEqual(repr(ranks.trunk), "<branch 'trunk': file, scene>")
|
||||
self.assertIn(t, ranks.trunk)
|
||||
self.assertIn(f, ranks.trunk)
|
||||
self.assertIn(s, ranks.trunk)
|
||||
t = ranks.Trunk.rank('')
|
||||
f = ranks.Trunk.rank('file')
|
||||
s = ranks.Trunk.rank('scene')
|
||||
self.assertEqual(repr(ranks.Trunk), "<branch 'Trunk': file, scene>")
|
||||
self.assertIn(t, ranks.Trunk)
|
||||
self.assertIn(f, ranks.Trunk)
|
||||
self.assertIn(s, ranks.Trunk)
|
||||
|
||||
|
||||
def test_defining_branch(self):
|
||||
b = ranks.Branch(ranks.trunk, 'myproject', 1,
|
||||
b = ranks.Branch(ranks.Trunk, 'myproject', 1,
|
||||
('project', 'series', 'episode', 'sequence',
|
||||
'block', 'shot', 'element'))
|
||||
|
||||
@ -30,7 +30,7 @@ class BranchTests(unittest.TestCase):
|
||||
|
||||
class RanksTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.b = ranks.Branch(ranks.trunk, 'myproject', 1,
|
||||
self.b = ranks.Branch(ranks.Trunk, 'myproject', 1,
|
||||
('project', 'series', 'episode', 'sequence',
|
||||
'block', 'shot', 'element'))
|
||||
|
||||
@ -115,12 +115,12 @@ class RanksTests(unittest.TestCase):
|
||||
pr = self.b.rank('project')
|
||||
|
||||
r = se - 1 # Normal - 'project' is one below 'series'
|
||||
s = se - 2 # ? Should this be 'project' or 'trunk'/None?
|
||||
s = se - 2 # ? Should this be 'project' or 'Trunk'/None?
|
||||
t = se - 3 # "` "
|
||||
|
||||
self.assertEqual(r, pr)
|
||||
self.assertEqual(s, ranks.trunk)
|
||||
self.assertEqual(t, ranks.trunk)
|
||||
self.assertEqual(s, ranks.Trunk)
|
||||
self.assertEqual(t, ranks.Trunk)
|
||||
|
||||
|
||||
def test_rank_slices_from_branch(self):
|
||||
@ -156,4 +156,14 @@ class RanksTests(unittest.TestCase):
|
||||
ranks.branch,
|
||||
self.b)
|
||||
|
||||
def test_using_rank_as_key(self):
|
||||
d = dict(zip(self.b.ranks, range(len(self.b.ranks))))
|
||||
R = self.b.rank
|
||||
|
||||
self.assertDictEqual(d, {
|
||||
R('trunk'):0, R('project'):1, R('series'):2, R('episode'):3,
|
||||
R('sequence'):4, R('block'):5, R('shot'):6, R('element'):7
|
||||
})
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user