Replaced parsers sub-package with parsers.py
After discovering that sub-packages in Blender add-ons don't work (this seems to be very broken!), I replaced the parsers subpackage with a parsers.py module (almost as simple as concatenating the source files in the package). I may be removing this name-parsing system from ABX entirely, soon, possibly moving it into KitCAT.
This commit is contained in:
parent
9db1245ab3
commit
5e24e06796
|
@ -12,6 +12,8 @@ bl_info = {
|
|||
"category": "Object",
|
||||
}
|
||||
|
||||
|
||||
|
||||
blender_present = False
|
||||
try:
|
||||
# These are protected so we can read the add-on metadata from my
|
||||
|
|
|
@ -58,7 +58,7 @@ definitions:
|
|||
abx:
|
||||
render_profiles:
|
||||
previz:
|
||||
name: PreViz,
|
||||
name: PreViz
|
||||
desc: 'GL/AVI Previz Render for Animatics'
|
||||
engine: gl
|
||||
version: any
|
||||
|
@ -66,7 +66,7 @@ abx:
|
|||
fps_div: 1000
|
||||
fps_skip: 1
|
||||
suffix: GL
|
||||
format: AVI_JPEG
|
||||
format: AVI
|
||||
extension: avi
|
||||
freestyle: False
|
||||
|
||||
|
@ -77,7 +77,7 @@ abx:
|
|||
fps: 30
|
||||
fps_skip: 3
|
||||
suffix: PT
|
||||
format: AVI_JPEG
|
||||
format: AVI
|
||||
extension: avi
|
||||
freestyle: False,
|
||||
antialias: False,
|
||||
|
@ -90,7 +90,7 @@ abx:
|
|||
fps: 30
|
||||
fps_skip: 30
|
||||
suffix: CH
|
||||
format: JPEG
|
||||
format: JPG
|
||||
extension: jpg
|
||||
framedigits: 5
|
||||
freestyle: True
|
||||
|
|
243
abx/abx_ui.py
243
abx/abx_ui.py
|
@ -29,21 +29,10 @@ import bpy, bpy.utils, bpy.types, bpy.props
|
|||
from bpy.app.handlers import persistent
|
||||
|
||||
from . import file_context
|
||||
|
||||
# if bpy.data.filepath:
|
||||
# BlendfileContext = file_context.FileContext(bpy.data.filepath)
|
||||
# else:
|
||||
# BlendfileContext = file_context.FileContext()
|
||||
#
|
||||
# abx_data = BlendfileContext.abx_data
|
||||
|
||||
from . import copy_anim
|
||||
from abx import ink_paint
|
||||
from . import ink_paint
|
||||
from . import render_profile
|
||||
|
||||
#configfile = os.path.join(os.path.dirname(__file__), 'config.yaml')
|
||||
|
||||
#print("Configuration file path: ", os.path.abspath(configfile))
|
||||
|
||||
# Lunatics Scene Panel
|
||||
|
||||
|
@ -133,94 +122,9 @@ def get_seq_ids(self, context):
|
|||
seq_enum_items = [(s, s, seq_id_table[series,episode][s]) for s in seq_ids]
|
||||
return seq_enum_items
|
||||
|
||||
# Another hard-coded table -- for render profiles
|
||||
render_profile_table = {
|
||||
'previz': {
|
||||
'name': 'PreViz',
|
||||
'desc': 'GL/AVI Previz Render for Animatics',
|
||||
'engine':'gl',
|
||||
'version':'any',
|
||||
'fps': 30,
|
||||
'fps_div': 1000,
|
||||
'fps_skip': 1,
|
||||
'suffix': 'GL',
|
||||
'format': 'AVI',
|
||||
'freestyle': False
|
||||
},
|
||||
|
||||
'paint6': {
|
||||
'name': '6fps Paint',
|
||||
'desc': '6fps Simplified Paint-Only Render',
|
||||
'engine':'bi',
|
||||
'fps': 30,
|
||||
'fps_skip': 5,
|
||||
'suffix': 'P6',
|
||||
'format': 'AVI',
|
||||
'freestyle': False,
|
||||
'antialias': False,
|
||||
'motionblur': False
|
||||
},
|
||||
|
||||
'paint3': {
|
||||
'name': '3fps Paint',
|
||||
'desc': '3fps Simplified Paint-Only Render',
|
||||
'engine': 'bi',
|
||||
'fps': 30,
|
||||
'fps_skip': 10,
|
||||
'suffix': 'P3',
|
||||
'format': 'AVI',
|
||||
'freestyle': False,
|
||||
'antialias': False,
|
||||
'motionblur': False,
|
||||
},
|
||||
|
||||
'paint': {
|
||||
'name': '30fps Paint',
|
||||
'desc': '30fps Simplified Paint-Only Render',
|
||||
'engine': 'bi',
|
||||
'fps': 30,
|
||||
'fps_skip': 1,
|
||||
'suffix': 'PT',
|
||||
'format': 'AVI',
|
||||
'freestyle': False,
|
||||
'antialias': False,
|
||||
'motionblur': False
|
||||
},
|
||||
|
||||
'check': {
|
||||
'name': '1fps Check',
|
||||
'desc': '1fps Full-Features Check Renders',
|
||||
'engine': 'bi',
|
||||
'fps': 30,
|
||||
'fps_skip': 30,
|
||||
'suffix': 'CH',
|
||||
'format': 'JPG',
|
||||
'framedigits': 5,
|
||||
'freestyle': True,
|
||||
'antialias': 8
|
||||
},
|
||||
|
||||
'full': {
|
||||
'name': '30fps Full',
|
||||
'desc': 'Full Render with all Features Turned On',
|
||||
'engine': 'bi',
|
||||
'fps': 30,
|
||||
'fps_skip': 1,
|
||||
'suffix': '',
|
||||
'format': 'PNG',
|
||||
'framedigits': 5,
|
||||
'freestyle': True,
|
||||
'antialias': 8
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class LunaticsSceneProperties(bpy.types.PropertyGroup):
|
||||
class ProjectProperties(bpy.types.PropertyGroup):
|
||||
"""
|
||||
Properties of the current scene.
|
||||
|
||||
NOTE: due to be replaced by 'ProjectProperties', using the schema data
|
||||
retrieved by file_context.
|
||||
Properties of the scene (and file), based on project context information.
|
||||
"""
|
||||
name_context_id = bpy.props.StringProperty(options={'HIDDEN', 'LIBRARY_EDITABLE'})
|
||||
|
||||
|
@ -232,6 +136,101 @@ class LunaticsSceneProperties(bpy.types.PropertyGroup):
|
|||
name_context = BlendFile.new_name_context()
|
||||
self.name_context_id = str(id(name_context))
|
||||
return name_context
|
||||
|
||||
render_folder = bpy.props.StringProperty(
|
||||
name = 'Render Folder',
|
||||
description = 'Path to the render folder (without filename)',
|
||||
subtype = 'FILE_PATH')
|
||||
|
||||
render_prefix = bpy.props.StringProperty(
|
||||
name = 'Render Prefix',
|
||||
description = 'Prefix used to create filenames used in rendering',
|
||||
subtype = 'FILE_NAME')
|
||||
|
||||
designation = bpy.props.StringProperty(
|
||||
name = 'Designation',
|
||||
description = 'Short code for this Blender scene only',
|
||||
maxlen=16)
|
||||
|
||||
role = bpy.props.EnumProperty(
|
||||
name = 'Role',
|
||||
description = 'Role of this scene in project',
|
||||
items = (('cam', 'Camera', 'Camera direction and render to EXR'),
|
||||
('compos', 'Compositing', 'Post-compositing from EXR'),
|
||||
('anim', 'Animation', 'Character animation scene'),
|
||||
('mech', 'Mechanical', 'Mech animation scene'),
|
||||
('asset', 'Asset', 'Project model assets'),
|
||||
('prop', 'Prop', 'Stage property asset'),
|
||||
('char', 'Character', 'Character model asset'),
|
||||
('prac', 'Practical', 'Practical property - rigged prop')),
|
||||
default='cam')
|
||||
|
||||
frame_start = bpy.props.IntProperty(
|
||||
name = 'Start',
|
||||
description = "Start frame of shot (used to set the render start frame)",
|
||||
soft_min = 0, soft_max=10000)
|
||||
|
||||
frame_end = bpy.props.IntProperty(
|
||||
name = 'End',
|
||||
description = "End frame of shot (used to set the render end frame)",
|
||||
soft_min = 0, soft_max=10000)
|
||||
|
||||
frame_rate = bpy.props.IntProperty(
|
||||
default = 30,
|
||||
name = 'FPS',
|
||||
description = "Frame rate for shot",
|
||||
soft_max = 30,
|
||||
min = 1, max = 120)
|
||||
|
||||
ink = bpy.props.EnumProperty(
|
||||
items = (('FS', 'Freestyle', 'Uses Freestyle Ink'),
|
||||
('EN', 'Edge Node', 'Uses EdgeNode for Ink'),
|
||||
('FE', 'FS + EN', 'Uses both Freestyle & EdgeNode for Ink'),
|
||||
('NI', 'No Ink', 'Does not use ink (paint render used for final)'),
|
||||
('CU', 'Custom', 'Custom setup, do not touch ink settings')),
|
||||
default = 'CU',
|
||||
name = 'Ink Type',
|
||||
description = "Determines how ink will be handled in final shot render")
|
||||
|
||||
class ProjectPanel(bpy.types.Panel):
|
||||
"""
|
||||
Add a panel to the Properties-Scene screen with Project Settings.
|
||||
"""
|
||||
bl_idname = 'SCENE_PT_project'
|
||||
bl_label = 'Project Properties'
|
||||
bl_space_type = 'PROPERTIES'
|
||||
bl_region_type = 'WINDOW'
|
||||
bl_context = 'scene'
|
||||
|
||||
def draw(self, context):
|
||||
pp = bpy.context.scene.project_properties
|
||||
self.layout.label(text='Project Properties')
|
||||
row = self.layout.row()
|
||||
row.prop(pp, 'render_folder')
|
||||
row = self.layout.row()
|
||||
row.prop(pp, 'render_prefix')
|
||||
row.prop(pp, 'designation')
|
||||
self.layout.label(text='Render Range')
|
||||
row = self.layout.row()
|
||||
row.prop(pp, 'frame_start')
|
||||
row.prop(pp, 'frame_end')
|
||||
row.prop(pp, 'frame_rate')
|
||||
self.layout.label(text='Extra')
|
||||
row = self.layout.row()
|
||||
row.prop(pp, 'role')
|
||||
row.prop(pp, 'ink')
|
||||
|
||||
# Buttons
|
||||
|
||||
|
||||
|
||||
class LunaticsSceneProperties(bpy.types.PropertyGroup):
|
||||
"""
|
||||
Properties of the current scene.
|
||||
|
||||
NOTE: due to be replaced by 'ProjectProperties', using the schema data
|
||||
retrieved by file_context.
|
||||
"""
|
||||
|
||||
series_id = bpy.props.EnumProperty(
|
||||
items=[
|
||||
|
@ -331,21 +330,7 @@ class LunaticsScenePanel(bpy.types.Panel):
|
|||
|
||||
# Buttons
|
||||
|
||||
class RenderProfileSettings(bpy.types.PropertyGroup):
|
||||
"""
|
||||
Settings for Render Profiles control.
|
||||
|
||||
NOTE: currently (0.2.6) uses hard-coded values. Planned to
|
||||
switch to project-defined values.
|
||||
"""
|
||||
render_profile = bpy.props.EnumProperty(
|
||||
name='Profile',
|
||||
items=[(k, v['name'], v['desc'])
|
||||
for k,v in render_profile_table.items()],
|
||||
description="Select from pre-defined profiles of render settings",
|
||||
default='full')
|
||||
|
||||
|
||||
|
||||
|
||||
class RenderProfilesOperator(bpy.types.Operator):
|
||||
"""
|
||||
|
@ -357,9 +342,9 @@ class RenderProfilesOperator(bpy.types.Operator):
|
|||
|
||||
def invoke(self, context, event):
|
||||
scene = context.scene
|
||||
profile = render_profile_table[scene.render_profile_settings.render_profile]
|
||||
profile = scene.render_profile_settings.render_profile
|
||||
|
||||
render_profile.set_render_from_profile(scene, profile)
|
||||
BlendFile.render_profiles.apply(scene, profile)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
@ -525,13 +510,7 @@ class lunatics_compositing(bpy.types.Operator):
|
|||
shot.cfg_scene()
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
# def draw(self, context):
|
||||
# settings = context.scene.lx_compos_settings
|
||||
# self.col = self.layout.col()
|
||||
# col.prop(settings, "inkthru", text="Ink Thru")
|
||||
# col.prop(settings, "billboards", text="Ink Thru")
|
||||
|
||||
|
||||
|
||||
|
||||
class LunaticsPanel(bpy.types.Panel):
|
||||
|
@ -555,7 +534,17 @@ class LunaticsPanel(bpy.types.Panel):
|
|||
|
||||
|
||||
BlendFile = file_context.FileContext()
|
||||
|
||||
|
||||
class RenderProfileSettings(bpy.types.PropertyGroup):
|
||||
"""
|
||||
Settings for Render Profiles control.
|
||||
"""
|
||||
render_profile = bpy.props.EnumProperty(
|
||||
name='Profile',
|
||||
items=render_profile.blender_enum_lookup,
|
||||
description="Select from render profiles defined in project")
|
||||
|
||||
|
||||
@persistent
|
||||
def update_handler(ctxt):
|
||||
"""
|
||||
|
@ -569,6 +558,10 @@ def register():
|
|||
bpy.types.Scene.lunaprops = bpy.props.PointerProperty(type=LunaticsSceneProperties)
|
||||
bpy.utils.register_class(LunaticsScenePanel)
|
||||
|
||||
bpy.utils.register_class(ProjectProperties)
|
||||
bpy.types.Scene.project_properties = bpy.props.PointerProperty(type=ProjectProperties)
|
||||
bpy.utils.register_class(ProjectPanel)
|
||||
|
||||
bpy.utils.register_class(RenderProfileSettings)
|
||||
bpy.types.Scene.render_profile_settings = bpy.props.PointerProperty(
|
||||
type=RenderProfileSettings)
|
||||
|
@ -593,6 +586,8 @@ def unregister():
|
|||
bpy.utils.unregister_class(LunaticsSceneProperties)
|
||||
bpy.utils.unregister_class(LunaticsScenePanel)
|
||||
|
||||
bpy.utils.unregister_class(ProjectProperties)
|
||||
|
||||
bpy.utils.unregister_class(RenderProfileSettings)
|
||||
bpy.utils.unregister_class(RenderProfilesOperator)
|
||||
bpy.utils.unregister_class(RenderProfilesPanel)
|
||||
|
|
|
@ -35,7 +35,7 @@ from .accumulate import RecursiveDict
|
|||
from .enum import Enum
|
||||
from .ranks import RankNotFound
|
||||
|
||||
from .parsers import NameParsers
|
||||
from abx.parsers import NameParsers
|
||||
|
||||
|
||||
log_level = Enum('DEBUG', 'INFO', 'WARNING', 'ERROR')
|
||||
|
@ -43,6 +43,8 @@ log_level = Enum('DEBUG', 'INFO', 'WARNING', 'ERROR')
|
|||
from .name_schema import FieldSchema
|
||||
|
||||
from .name_context import NameContext
|
||||
|
||||
#from .render_profile import RenderProfileMap
|
||||
|
||||
class FileContext(NameContext):
|
||||
"""
|
||||
|
@ -244,6 +246,7 @@ class FileContext(NameContext):
|
|||
# Defaults
|
||||
self.provided_data = RecursiveDict(DEFAULT_YAML, source='default')
|
||||
self.abx_fields = DEFAULT_YAML['abx']
|
||||
self.render_profiles = {} #RenderProfileMap()
|
||||
|
||||
def clear_notes(self):
|
||||
"""
|
||||
|
@ -294,6 +297,9 @@ class FileContext(NameContext):
|
|||
# Did we find the YAML data for the project?
|
||||
# Did we find the project root?
|
||||
|
||||
self.render_profiles = self.abx_fields['render_profiles']
|
||||
#self.render_profiles = RenderProfileMap(self.abx_fields['render_profiles'])
|
||||
|
||||
# TODO: Bug?
|
||||
# Note that 'project_schema' might not be correct if overrides are given.
|
||||
# As things are, I think it will simply append the overrides, and this
|
||||
|
|
|
@ -0,0 +1,490 @@
|
|||
# parsers (sub-package)
|
||||
"""
|
||||
Filename Parsers & Registry for FileContext.
|
||||
"""
|
||||
|
||||
import re, copy, os
|
||||
|
||||
import yaml
|
||||
|
||||
NameParsers = {} # Parser registry
|
||||
|
||||
def registered_parser(parser):
|
||||
"""
|
||||
Decorator function to register a parser class.
|
||||
"""
|
||||
NameParsers[parser.name] = parser
|
||||
return parser
|
||||
|
||||
wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)')
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Episode:
|
||||
"""
|
||||
Original "Lunatics!" filename parsing algorithm. (DEPRECATED)
|
||||
|
||||
This parser was written before the Schema parser. It hard-codes the schema used
|
||||
in the "Lunatics!" Project, and can probably be safely replaced by using the Schema
|
||||
parser with appropriate YAML settings in the <project>.yaml file, which also allows
|
||||
much more flexibility in naming schemes.
|
||||
|
||||
YAML parameter settings available for this parser:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_episode # Force use of this parser
|
||||
|
||||
parser_options: # Available settings (w/ defaults)
|
||||
field_separator: '-'
|
||||
episode_separator: 'E'
|
||||
filetype_separator: '.'
|
||||
|
||||
Filetypes and roles are hard-code, and can't be changed from the YAML.
|
||||
|
||||
Assumes field-based filenames of the form:
|
||||
|
||||
<series>E<episode>[-<seq>[-<block>[-Cam<camera>][-<shot>]]][-<title>]-<role>.<filetype>
|
||||
|
||||
Where the <field> indicates fields with fieldnames, and there are three expected separators:
|
||||
|
||||
- is the 'field_separator'
|
||||
E is the 'episode_separator'
|
||||
. is the 'filetype_separator'
|
||||
|
||||
(These can be overridden in the initialization).
|
||||
The class is callable, taking a string as input and returning a dictionary of fields.
|
||||
"""
|
||||
name = 'abx_episode'
|
||||
|
||||
max_score = 10 # Maximum number of fields parsed
|
||||
|
||||
# supported values for filetype
|
||||
filetypes = {
|
||||
'blend': "Blender File",
|
||||
'kdenlive': "Kdenlive Video Editor File",
|
||||
'mlt': "Kdenlive Video Mix Script",
|
||||
'svg': "Scalable Vector Graphics (Inkscape)",
|
||||
'kra': "Krita Graphic File",
|
||||
'xcf': "Gimp Graphic File",
|
||||
'png': "Portable Network Graphics (PNG) Image",
|
||||
'jpg': "Joint Photographic Experts Group (JPEG) Image",
|
||||
'aup': "Audacity Project",
|
||||
'ardour': "Ardour Project",
|
||||
'flac': "Free Lossless Audio Codec (FLAC)",
|
||||
'mp3': "MPEG Audio Layer III (MP3) Audio File",
|
||||
'ogg': "Ogg Vorbis Audio File",
|
||||
'avi': "Audio Video Interleave (AVI) Video Container",
|
||||
'mkv': "Matroska Video Container",
|
||||
'mp4': "Moving Picture Experts Group (MPEG) 4 Format}",
|
||||
'txt': "Plain Text File"
|
||||
}
|
||||
|
||||
# Roles that make sense in an episode context
|
||||
roles = {
|
||||
'extras': "Extras, crowds, auxillary animated movement",
|
||||
'mech': "Mechanical animation",
|
||||
'anim': "Character animation",
|
||||
'cam': "Camera direction",
|
||||
'vfx': "Visual special effects",
|
||||
'compos': "Compositing",
|
||||
'bkg': "Background 2D image",
|
||||
'bb': "Billboard 2D image",
|
||||
'tex': "Texture 2D image",
|
||||
'foley': "Foley sound",
|
||||
'voice': "Voice recording",
|
||||
'fx': "Sound effects",
|
||||
'music': "Music track",
|
||||
'cue': "Musical cue",
|
||||
'amb': "Ambient sound",
|
||||
'loop': "Ambient sound loop",
|
||||
'edit': "Video edit"
|
||||
}
|
||||
|
||||
# A few filetypes imply their roles:
|
||||
roles_by_filetype = {
|
||||
'kdenlive': 'edit',
|
||||
'mlt': 'edit'
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, field_separator='-', episode_separator='E', filetype_separator='.',
|
||||
fields=None, filetypes=None, roles=None, **kwargs):
|
||||
if not fields:
|
||||
fields = {}
|
||||
if filetypes:
|
||||
self.filetypes = copy.deepcopy(self.filetypes) # Copy class attribute to instance
|
||||
self.filetypes.update(filetypes) # Update with new values
|
||||
if roles:
|
||||
self.roles = copy.deepcopy(self.roles) # Copy class attribute to instance
|
||||
self.roles.update(roles) # Update with new values
|
||||
self.field_separator = field_separator
|
||||
self.episode_separator = episode_separator
|
||||
self.filetype_separator = filetype_separator
|
||||
|
||||
def __call__(self, filename, namepath):
|
||||
score = 0.0
|
||||
fielddata = {}
|
||||
|
||||
# Check for filetype ending
|
||||
i_filetype = filename.rfind(self.filetype_separator)
|
||||
if i_filetype < 0:
|
||||
fielddata['filetype'] = None
|
||||
else:
|
||||
fielddata['filetype'] = filename[i_filetype+1:]
|
||||
filename = filename[:i_filetype]
|
||||
score = score + 1.0
|
||||
|
||||
components = filename.split(self.field_separator)
|
||||
|
||||
# Check for role marker in last component
|
||||
if components[-1] in self.roles:
|
||||
fielddata['role'] = components[-1]
|
||||
del components[-1]
|
||||
fielddata['hierarchy'] = 'episode'
|
||||
score = score + 2.0
|
||||
elif fielddata['filetype'] in self.roles_by_filetype:
|
||||
fielddata['role'] = self.roles_by_filetype[fielddata['filetype']]
|
||||
fielddata['hierarchy'] = 'episode'
|
||||
else:
|
||||
fielddata['role'] = None
|
||||
fielddata['hierarchy'] = None
|
||||
|
||||
# Check for a descriptive title (must be 3+ characters in length)
|
||||
if components and len(components[-1])>2:
|
||||
# Normalize the title as words with spaces
|
||||
title = ' '.join(w for w in wordre.split(components[-1]) if wordre.fullmatch(w))
|
||||
del components[-1]
|
||||
score = score + 1.0
|
||||
else:
|
||||
title = None
|
||||
|
||||
# Check if first field contains series/episode number
|
||||
if components:
|
||||
prefix = components[0]
|
||||
try:
|
||||
fielddata['series'] = {}
|
||||
fielddata['episode'] = {}
|
||||
fielddata['series']['code'], episode_id = prefix.split(self.episode_separator)
|
||||
fielddata['episode']['code'] = int(episode_id)
|
||||
fielddata['rank'] = 'episode'
|
||||
del components[0]
|
||||
score = score + 2.0
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check for sequence/block/shot/camera designations
|
||||
if components:
|
||||
fielddata['seq'] = {}
|
||||
fielddata['seq']['code'] = components[0]
|
||||
fielddata['rank'] = 'seq'
|
||||
del components[0]
|
||||
score = score + 1.0
|
||||
|
||||
if components:
|
||||
try:
|
||||
fielddata['block'] = {}
|
||||
fielddata['block']['code'] = int(components[0])
|
||||
del components[0]
|
||||
fielddata['rank'] = 'block'
|
||||
score = score + 1.0
|
||||
except:
|
||||
pass
|
||||
|
||||
if components and components[0].startswith('Cam'):
|
||||
fielddata['camera'] = {}
|
||||
fielddata['camera']['code'] = components[0][len('Cam'):]
|
||||
fielddata['rank'] = 'camera'
|
||||
del components[0]
|
||||
score = score + 1.0
|
||||
|
||||
if components:
|
||||
# Any remaining structure is joined back to make the shot ID
|
||||
fielddata['shot'] = {}
|
||||
fielddata['shot']['code'] = ''.join(components)
|
||||
fielddata['rank'] = 'shot'
|
||||
components = None
|
||||
score = score + 1.0
|
||||
|
||||
if title and fielddata['rank'] in fielddata:
|
||||
fielddata[fielddata['rank']]['title'] = title
|
||||
|
||||
return score/self.max_score, fielddata
|
||||
|
||||
|
||||
|
||||
|
||||
DEFAULT_YAML = {}
|
||||
with open(os.path.join(os.path.dirname(__file__), 'abx.yaml')) as def_yaml_file:
|
||||
DEFAULT_YAML.update(yaml.safe_load(def_yaml_file))
|
||||
|
||||
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Fallback(object):
|
||||
"""
|
||||
Highly-tolerant parser to fall back on if others fail.
|
||||
|
||||
The fallback parser makes only a very minimal and robust set of assumptions.
|
||||
|
||||
Any legal filename will successfully return a simple parse, though much
|
||||
interpretation may be lost. It still allows for common field-based practices,
|
||||
but falls back on using the unaltered filename if necessary.
|
||||
|
||||
YAML options available:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_fallback # Force use of this parser.
|
||||
|
||||
There are no other options. Field separators are defined very broadly,
|
||||
and include most non-word characters (~#$!=+&_-). This was mostly designed
|
||||
to work without a project schema available.
|
||||
"""
|
||||
name = 'abx_fallback'
|
||||
|
||||
filetypes = DEFAULT_YAML['definitions']['filetypes']
|
||||
roles = DEFAULT_YAML['definitions']['roles']
|
||||
roles_by_filetype = (
|
||||
DEFAULT_YAML['definitions']['roles_by_filetype'])
|
||||
|
||||
main_sep_re = re.compile(r'\W+') # Any single non-word char
|
||||
comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+')
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
def _parse_ending(self, filename, separator):
|
||||
try:
|
||||
remainder, suffix = filename.rsplit(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
remainder = filename
|
||||
suffix = None
|
||||
score = 0.0
|
||||
return (suffix, remainder, score)
|
||||
|
||||
def __call__(self, filename, namepath):
|
||||
fields = {}
|
||||
score = 1.0
|
||||
possible = 4.5
|
||||
|
||||
split = filename.rsplit('.', 1)
|
||||
if len(split)<2 or split[1] not in self.filetypes:
|
||||
fields['filetype'] = None
|
||||
remainder = filename
|
||||
score += 1.0
|
||||
else:
|
||||
fields['filetype'] = split[1]
|
||||
remainder = split[0]
|
||||
|
||||
comment_match = self.comment_sep_re.search(remainder)
|
||||
if comment_match:
|
||||
fields['comment'] = remainder[comment_match.end():]
|
||||
remainder = remainder[:comment_match.start()]
|
||||
else:
|
||||
fields['comment'] = None
|
||||
|
||||
role = self.main_sep_re.split(remainder)[-1]
|
||||
if role in self.roles:
|
||||
fields['role'] = role
|
||||
remainder = remainder[:-1-len(role)]
|
||||
score += 1.0
|
||||
else:
|
||||
fields['role'] = None
|
||||
|
||||
# Implied role
|
||||
if fields['filetype'] in self.roles_by_filetype:
|
||||
fields['role'] = self.roles_by_filetype[fields['filetype']]
|
||||
score += 1.0
|
||||
|
||||
words = self.main_sep_re.split(remainder)
|
||||
fields['code'] = ''.join([w.capitalize() for w in words])
|
||||
fields['title'] = remainder
|
||||
|
||||
return score/possible, fields
|
||||
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Schema(object):
|
||||
"""
|
||||
Parser based on using the list of schemas.
|
||||
The schemas are normally defined in the project root directory YAML.
|
||||
|
||||
Expands on the 'abx_episode' parser by allowing all the schema to
|
||||
be defined by outside configuration data (generally provided in a
|
||||
project YAML file, but this module does not depend on the data
|
||||
source used).
|
||||
|
||||
The project YAML can additionally control parsing with this parser:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_schema # Force use of this parser
|
||||
|
||||
parser_options: # Set parameters
|
||||
filetype_separator: '.'
|
||||
comment_separator: '--'
|
||||
role_separator: '-'
|
||||
title_separator: '-'
|
||||
|
||||
filetypes: # Recognized filetypes.
|
||||
blend: Blender File # <filetype>: documentation
|
||||
...
|
||||
|
||||
roles: # Recognized role fields.
|
||||
anim: Character Animation # <role>: documentation
|
||||
...
|
||||
|
||||
roles_by_filetype: # Roles implied by filetype.
|
||||
kdenlive: edit # <filetype>:<role>
|
||||
...
|
||||
|
||||
(For the full default lists see abx/abx.yaml).
|
||||
|
||||
schemas (list): The current schema-list defining how filenames should be parsed.
|
||||
This "Schema" parser uses this to determine both parsing and
|
||||
mapping of text fields in the filename.
|
||||
|
||||
definitions(dict): The project definitions currently visible to the parser.
|
||||
"""
|
||||
name = 'abx_schema'
|
||||
|
||||
def __init__(self, schemas=None, definitions=None,
|
||||
filetype_separator = '.',
|
||||
comment_separator = '--',
|
||||
role_separator = '-',
|
||||
title_separator = '-',
|
||||
**kwargs):
|
||||
|
||||
self.filetype_separator = filetype_separator
|
||||
self.comment_separator = comment_separator
|
||||
self.role_separator = role_separator
|
||||
self.title_separator = title_separator
|
||||
|
||||
self.schemas = schemas
|
||||
|
||||
if 'roles' in definitions:
|
||||
self.roles = definitions['roles']
|
||||
else:
|
||||
self.roles = []
|
||||
|
||||
if 'filetypes' in definitions:
|
||||
self.filetypes = definitions['filetypes']
|
||||
else:
|
||||
self.filetypes = []
|
||||
|
||||
if 'roles_by_filetype' in definitions:
|
||||
self.roles_by_filetype = definitions['roles_by_filetype']
|
||||
else:
|
||||
self.roles_by_filetype = []
|
||||
|
||||
def _parse_ending(self, filename, separator):
|
||||
try:
|
||||
remainder, suffix = filename.rsplit(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
remainder = filename
|
||||
suffix = None
|
||||
score = 0.0
|
||||
return (suffix, remainder, score)
|
||||
|
||||
def _parse_beginning(self, filename, separator):
|
||||
try:
|
||||
prefix, remainder = filename.split(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
prefix = filename
|
||||
remainder = ''
|
||||
score = 0.0
|
||||
return (prefix, remainder, score)
|
||||
|
||||
def __call__ (self, filename, namepath, debug=False):
|
||||
fields = {}
|
||||
score = 0.0
|
||||
possible = 0.0
|
||||
|
||||
# First get specially-handled extensions
|
||||
remainder = filename
|
||||
field, newremainder, s = self._parse_ending(remainder, self.filetype_separator)
|
||||
if field and field in self.filetypes:
|
||||
remainder = newremainder
|
||||
fields['filetype'] = field
|
||||
score += s*1.0
|
||||
else:
|
||||
fields['filetype'] = None
|
||||
|
||||
field, remainder, s = self._parse_ending(remainder, self.comment_separator)
|
||||
fields['comment'] = field
|
||||
score += s*0.5
|
||||
|
||||
field, newremainder, s = self._parse_ending(remainder, self.role_separator)
|
||||
if field and field in self.roles:
|
||||
remainder = newremainder
|
||||
fields['role'] = field
|
||||
score += s*0.5
|
||||
else:
|
||||
fields['role'] = None
|
||||
|
||||
field, remainder, s = self._parse_ending(remainder, self.title_separator)
|
||||
fields['title'] = field
|
||||
score += s*0.5
|
||||
|
||||
possible += 3.0
|
||||
|
||||
# Implicit roles
|
||||
if ( not fields['role'] and
|
||||
fields['filetype'] and
|
||||
fields['role'] in self.roles_by_filetype):
|
||||
self.role = self.roles_by_filetype[fields['filetype']]
|
||||
score += 0.2
|
||||
|
||||
#possible += 0.2
|
||||
|
||||
# Figure the rest out from the schema
|
||||
# Find the matching rank start position for the filename
|
||||
start = 0
|
||||
for start, (schema, name) in enumerate(zip(self.schemas, namepath)):
|
||||
field, r, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
try:
|
||||
if field.lower() == schema.format.format(name).lower():
|
||||
score += 1.0
|
||||
break
|
||||
except ValueError:
|
||||
print(' (365) field, format', field, schema.format)
|
||||
|
||||
possible += 1.0
|
||||
|
||||
# Starting from that position, try to match fields
|
||||
# up to the end of the namepath (checking against it)
|
||||
irank = 0
|
||||
for irank, (schema, name) in enumerate(
|
||||
zip(self.schemas[start:], namepath[start:])):
|
||||
if not remainder: break
|
||||
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
score += s
|
||||
try:
|
||||
if ( type(field) == str and
|
||||
field.lower() == schema.format.format(name).lower()):
|
||||
fields[schema.rank]={'code':field}
|
||||
fields['rank'] = schema.rank
|
||||
score += 1.0
|
||||
except ValueError:
|
||||
print(' (384) field, format', field, schema.format)
|
||||
possible += 2.0
|
||||
|
||||
# Remaining fields are authoritative (doesn't affect score)
|
||||
for schema in self.schemas[irank:]:
|
||||
if not remainder: break
|
||||
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
fields[schema.rank]={'code':field}
|
||||
fields['rank'] = schema.rank
|
||||
|
||||
if 'rank' in fields:
|
||||
fields[fields['rank']]['title'] = fields['title']
|
||||
|
||||
if not fields['role'] and fields['filetype'] in self.roles_by_filetype:
|
||||
fields['role'] = self.roles_by_filetype[fields['filetype']]
|
||||
|
||||
return score/possible, fields
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
# parsers (sub-package)
|
||||
"""
|
||||
Filename Parsers & Registry for FileContext.
|
||||
"""
|
||||
|
||||
NameParsers = {} # Parser registry
|
||||
|
||||
def registered_parser(parser):
|
||||
"""
|
||||
Decorator function to register a parser class.
|
||||
"""
|
||||
NameParsers[parser.name] = parser
|
||||
return parser
|
||||
|
||||
from . import abx_episode, abx_fallback, abx_schema
|
||||
|
|
@ -1,204 +0,0 @@
|
|||
# abx_episode.py
|
||||
"""
|
||||
Custom parser written for "Lunatics!" Project Episode files.
|
||||
|
||||
Superseded by 'abx_schema' parser (probably).
|
||||
"""
|
||||
|
||||
import re, copy
|
||||
|
||||
from . import registered_parser
|
||||
|
||||
wordre = re.compile(r'([A-Z][a-z]+|[a-z]+|[0-9]+|[A-Z][A-Z]+)')
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Episode:
|
||||
"""
|
||||
Original "Lunatics!" filename parsing algorithm. (DEPRECATED)
|
||||
|
||||
This parser was written before the Schema parser. It hard-codes the schema used
|
||||
in the "Lunatics!" Project, and can probably be safely replaced by using the Schema
|
||||
parser with appropriate YAML settings in the <project>.yaml file, which also allows
|
||||
much more flexibility in naming schemes.
|
||||
|
||||
YAML parameter settings available for this parser:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_episode # Force use of this parser
|
||||
|
||||
parser_options: # Available settings (w/ defaults)
|
||||
field_separator: '-'
|
||||
episode_separator: 'E'
|
||||
filetype_separator: '.'
|
||||
|
||||
Filetypes and roles are hard-code, and can't be changed from the YAML.
|
||||
|
||||
Assumes field-based filenames of the form:
|
||||
|
||||
<series>E<episode>[-<seq>[-<block>[-Cam<camera>][-<shot>]]][-<title>]-<role>.<filetype>
|
||||
|
||||
Where the <field> indicates fields with fieldnames, and there are three expected separators:
|
||||
|
||||
- is the 'field_separator'
|
||||
E is the 'episode_separator'
|
||||
. is the 'filetype_separator'
|
||||
|
||||
(These can be overridden in the initialization).
|
||||
The class is callable, taking a string as input and returning a dictionary of fields.
|
||||
"""
|
||||
name = 'abx_episode'
|
||||
|
||||
max_score = 10 # Maximum number of fields parsed
|
||||
|
||||
# supported values for filetype
|
||||
filetypes = {
|
||||
'blend': "Blender File",
|
||||
'kdenlive': "Kdenlive Video Editor File",
|
||||
'mlt': "Kdenlive Video Mix Script",
|
||||
'svg': "Scalable Vector Graphics (Inkscape)",
|
||||
'kra': "Krita Graphic File",
|
||||
'xcf': "Gimp Graphic File",
|
||||
'png': "Portable Network Graphics (PNG) Image",
|
||||
'jpg': "Joint Photographic Experts Group (JPEG) Image",
|
||||
'aup': "Audacity Project",
|
||||
'ardour': "Ardour Project",
|
||||
'flac': "Free Lossless Audio Codec (FLAC)",
|
||||
'mp3': "MPEG Audio Layer III (MP3) Audio File",
|
||||
'ogg': "Ogg Vorbis Audio File",
|
||||
'avi': "Audio Video Interleave (AVI) Video Container",
|
||||
'mkv': "Matroska Video Container",
|
||||
'mp4': "Moving Picture Experts Group (MPEG) 4 Format}",
|
||||
'txt': "Plain Text File"
|
||||
}
|
||||
|
||||
# Roles that make sense in an episode context
|
||||
roles = {
|
||||
'extras': "Extras, crowds, auxillary animated movement",
|
||||
'mech': "Mechanical animation",
|
||||
'anim': "Character animation",
|
||||
'cam': "Camera direction",
|
||||
'vfx': "Visual special effects",
|
||||
'compos': "Compositing",
|
||||
'bkg': "Background 2D image",
|
||||
'bb': "Billboard 2D image",
|
||||
'tex': "Texture 2D image",
|
||||
'foley': "Foley sound",
|
||||
'voice': "Voice recording",
|
||||
'fx': "Sound effects",
|
||||
'music': "Music track",
|
||||
'cue': "Musical cue",
|
||||
'amb': "Ambient sound",
|
||||
'loop': "Ambient sound loop",
|
||||
'edit': "Video edit"
|
||||
}
|
||||
|
||||
# A few filetypes imply their roles:
|
||||
roles_by_filetype = {
|
||||
'kdenlive': 'edit',
|
||||
'mlt': 'edit'
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, field_separator='-', episode_separator='E', filetype_separator='.',
|
||||
fields=None, filetypes=None, roles=None, **kwargs):
|
||||
if not fields:
|
||||
fields = {}
|
||||
if filetypes:
|
||||
self.filetypes = copy.deepcopy(self.filetypes) # Copy class attribute to instance
|
||||
self.filetypes.update(filetypes) # Update with new values
|
||||
if roles:
|
||||
self.roles = copy.deepcopy(self.roles) # Copy class attribute to instance
|
||||
self.roles.update(roles) # Update with new values
|
||||
self.field_separator = field_separator
|
||||
self.episode_separator = episode_separator
|
||||
self.filetype_separator = filetype_separator
|
||||
|
||||
def __call__(self, filename, namepath):
|
||||
score = 0.0
|
||||
fielddata = {}
|
||||
|
||||
# Check for filetype ending
|
||||
i_filetype = filename.rfind(self.filetype_separator)
|
||||
if i_filetype < 0:
|
||||
fielddata['filetype'] = None
|
||||
else:
|
||||
fielddata['filetype'] = filename[i_filetype+1:]
|
||||
filename = filename[:i_filetype]
|
||||
score = score + 1.0
|
||||
|
||||
components = filename.split(self.field_separator)
|
||||
|
||||
# Check for role marker in last component
|
||||
if components[-1] in self.roles:
|
||||
fielddata['role'] = components[-1]
|
||||
del components[-1]
|
||||
fielddata['hierarchy'] = 'episode'
|
||||
score = score + 2.0
|
||||
elif fielddata['filetype'] in self.roles_by_filetype:
|
||||
fielddata['role'] = self.roles_by_filetype[fielddata['filetype']]
|
||||
fielddata['hierarchy'] = 'episode'
|
||||
else:
|
||||
fielddata['role'] = None
|
||||
fielddata['hierarchy'] = None
|
||||
|
||||
# Check for a descriptive title (must be 3+ characters in length)
|
||||
if components and len(components[-1])>2:
|
||||
# Normalize the title as words with spaces
|
||||
title = ' '.join(w for w in wordre.split(components[-1]) if wordre.fullmatch(w))
|
||||
del components[-1]
|
||||
score = score + 1.0
|
||||
else:
|
||||
title = None
|
||||
|
||||
# Check if first field contains series/episode number
|
||||
if components:
|
||||
prefix = components[0]
|
||||
try:
|
||||
fielddata['series'] = {}
|
||||
fielddata['episode'] = {}
|
||||
fielddata['series']['code'], episode_id = prefix.split(self.episode_separator)
|
||||
fielddata['episode']['code'] = int(episode_id)
|
||||
fielddata['rank'] = 'episode'
|
||||
del components[0]
|
||||
score = score + 2.0
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check for sequence/block/shot/camera designations
|
||||
if components:
|
||||
fielddata['seq'] = {}
|
||||
fielddata['seq']['code'] = components[0]
|
||||
fielddata['rank'] = 'seq'
|
||||
del components[0]
|
||||
score = score + 1.0
|
||||
|
||||
if components:
|
||||
try:
|
||||
fielddata['block'] = {}
|
||||
fielddata['block']['code'] = int(components[0])
|
||||
del components[0]
|
||||
fielddata['rank'] = 'block'
|
||||
score = score + 1.0
|
||||
except:
|
||||
pass
|
||||
|
||||
if components and components[0].startswith('Cam'):
|
||||
fielddata['camera'] = {}
|
||||
fielddata['camera']['code'] = components[0][len('Cam'):]
|
||||
fielddata['rank'] = 'camera'
|
||||
del components[0]
|
||||
score = score + 1.0
|
||||
|
||||
if components:
|
||||
# Any remaining structure is joined back to make the shot ID
|
||||
fielddata['shot'] = {}
|
||||
fielddata['shot']['code'] = ''.join(components)
|
||||
fielddata['rank'] = 'shot'
|
||||
components = None
|
||||
score = score + 1.0
|
||||
|
||||
if title and fielddata['rank'] in fielddata:
|
||||
fielddata[fielddata['rank']]['title'] = title
|
||||
|
||||
return score/self.max_score, fielddata
|
|
@ -1,105 +0,0 @@
|
|||
# abx_fallback.py
|
||||
"""
|
||||
Fallback parser used in case others fail.
|
||||
|
||||
The fallback parser makes only a very minimal and robust set of assumptions.
|
||||
|
||||
Any legal filename will successfully return a simple parse, though much
|
||||
interpretation may be lost. It still allows for common field-based practices,
|
||||
but falls back on using the unaltered filename if necessary.
|
||||
"""
|
||||
|
||||
import re, os
|
||||
|
||||
import yaml
|
||||
|
||||
from . import registered_parser
|
||||
|
||||
|
||||
DEFAULT_YAML = {}
|
||||
with open(os.path.join(os.path.dirname(__file__), '..', 'abx.yaml')) as def_yaml_file:
|
||||
DEFAULT_YAML.update(yaml.safe_load(def_yaml_file))
|
||||
|
||||
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Fallback(object):
|
||||
"""
|
||||
Highly-tolerant parser to fall back on if others fail.
|
||||
|
||||
Makes very minimal assumptions about filename structure.
|
||||
|
||||
YAML options available:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_fallback # Force use of this parser.
|
||||
|
||||
There are no other options. Field separators are defined very broadly,
|
||||
and include most non-word characters (~#$!=+&_-). This was mostly designed
|
||||
to work without a project schema available.
|
||||
"""
|
||||
name = 'abx_fallback'
|
||||
|
||||
filetypes = DEFAULT_YAML['definitions']['filetypes']
|
||||
roles = DEFAULT_YAML['definitions']['roles']
|
||||
roles_by_filetype = (
|
||||
DEFAULT_YAML['definitions']['roles_by_filetype'])
|
||||
|
||||
main_sep_re = re.compile(r'\W+') # Any single non-word char
|
||||
comment_sep_re = re.compile(r'[\W_][\W_]+|[~#$!=+&]+')
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
def _parse_ending(self, filename, separator):
|
||||
try:
|
||||
remainder, suffix = filename.rsplit(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
remainder = filename
|
||||
suffix = None
|
||||
score = 0.0
|
||||
return (suffix, remainder, score)
|
||||
|
||||
def __call__(self, filename, namepath):
|
||||
fields = {}
|
||||
score = 1.0
|
||||
possible = 4.5
|
||||
|
||||
split = filename.rsplit('.', 1)
|
||||
if len(split)<2 or split[1] not in self.filetypes:
|
||||
fields['filetype'] = None
|
||||
remainder = filename
|
||||
score += 1.0
|
||||
else:
|
||||
fields['filetype'] = split[1]
|
||||
remainder = split[0]
|
||||
|
||||
comment_match = self.comment_sep_re.search(remainder)
|
||||
if comment_match:
|
||||
fields['comment'] = remainder[comment_match.end():]
|
||||
remainder = remainder[:comment_match.start()]
|
||||
else:
|
||||
fields['comment'] = None
|
||||
|
||||
role = self.main_sep_re.split(remainder)[-1]
|
||||
if role in self.roles:
|
||||
fields['role'] = role
|
||||
remainder = remainder[:-1-len(role)]
|
||||
score += 1.0
|
||||
else:
|
||||
fields['role'] = None
|
||||
|
||||
# Implied role
|
||||
if fields['filetype'] in self.roles_by_filetype:
|
||||
fields['role'] = self.roles_by_filetype[fields['filetype']]
|
||||
score += 1.0
|
||||
|
||||
words = self.main_sep_re.split(remainder)
|
||||
fields['code'] = ''.join([w.capitalize() for w in words])
|
||||
fields['title'] = remainder
|
||||
|
||||
return score/possible, fields
|
||||
|
|
@ -1,189 +0,0 @@
|
|||
# abx_schema.py
|
||||
"""
|
||||
Generalized fields-based parser based on provided schema.
|
||||
|
||||
Expands on the 'abx_episode' parser by allowing all the schema to
|
||||
be defined by outside configuration data (generally provided in a
|
||||
project YAML file, but this module does not depend on the data
|
||||
source used).
|
||||
"""
|
||||
|
||||
from . import registered_parser
|
||||
|
||||
@registered_parser
|
||||
class Parser_ABX_Schema(object):
|
||||
"""
|
||||
Parser based on using the list of schemas.
|
||||
The schemas are normally defined in the project root directory YAML.
|
||||
|
||||
The project YAML can additionally control parsing with this parser:
|
||||
|
||||
---
|
||||
definitions:
|
||||
parser: abx_schema # Force use of this parser
|
||||
|
||||
parser_options: # Set parameters
|
||||
filetype_separator: '.'
|
||||
comment_separator: '--'
|
||||
role_separator: '-'
|
||||
title_separator: '-'
|
||||
|
||||
filetypes: # Recognized filetypes.
|
||||
blend: Blender File # <filetype>: documentation
|
||||
...
|
||||
|
||||
roles: # Recognized role fields.
|
||||
anim: Character Animation # <role>: documentation
|
||||
...
|
||||
|
||||
roles_by_filetype: # Roles implied by filetype.
|
||||
kdenlive: edit # <filetype>:<role>
|
||||
...
|
||||
|
||||
(For the full default lists see abx/abx.yaml).
|
||||
|
||||
schemas (list): The current schema-list defining how filenames should be parsed.
|
||||
This "Schema" parser uses this to determine both parsing and
|
||||
mapping of text fields in the filename.
|
||||
|
||||
definitions(dict): The project definitions currently visible to the parser.
|
||||
"""
|
||||
name = 'abx_schema'
|
||||
|
||||
def __init__(self, schemas=None, definitions=None,
|
||||
filetype_separator = '.',
|
||||
comment_separator = '--',
|
||||
role_separator = '-',
|
||||
title_separator = '-',
|
||||
**kwargs):
|
||||
|
||||
self.filetype_separator = filetype_separator
|
||||
self.comment_separator = comment_separator
|
||||
self.role_separator = role_separator
|
||||
self.title_separator = title_separator
|
||||
|
||||
self.schemas = schemas
|
||||
|
||||
if 'roles' in definitions:
|
||||
self.roles = definitions['roles']
|
||||
else:
|
||||
self.roles = []
|
||||
|
||||
if 'filetypes' in definitions:
|
||||
self.filetypes = definitions['filetypes']
|
||||
else:
|
||||
self.filetypes = []
|
||||
|
||||
if 'roles_by_filetype' in definitions:
|
||||
self.roles_by_filetype = definitions['roles_by_filetype']
|
||||
else:
|
||||
self.roles_by_filetype = []
|
||||
|
||||
def _parse_ending(self, filename, separator):
|
||||
try:
|
||||
remainder, suffix = filename.rsplit(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
remainder = filename
|
||||
suffix = None
|
||||
score = 0.0
|
||||
return (suffix, remainder, score)
|
||||
|
||||
def _parse_beginning(self, filename, separator):
|
||||
try:
|
||||
prefix, remainder = filename.split(separator, 1)
|
||||
score = 1.0
|
||||
except ValueError:
|
||||
prefix = filename
|
||||
remainder = ''
|
||||
score = 0.0
|
||||
return (prefix, remainder, score)
|
||||
|
||||
def __call__ (self, filename, namepath, debug=False):
|
||||
fields = {}
|
||||
score = 0.0
|
||||
possible = 0.0
|
||||
|
||||
# First get specially-handled extensions
|
||||
remainder = filename
|
||||
field, newremainder, s = self._parse_ending(remainder, self.filetype_separator)
|
||||
if field and field in self.filetypes:
|
||||
remainder = newremainder
|
||||
fields['filetype'] = field
|
||||
score += s*1.0
|
||||
else:
|
||||
fields['filetype'] = None
|
||||
|
||||
field, remainder, s = self._parse_ending(remainder, self.comment_separator)
|
||||
fields['comment'] = field
|
||||
score += s*0.5
|
||||
|
||||
field, newremainder, s = self._parse_ending(remainder, self.role_separator)
|
||||
if field and field in self.roles:
|
||||
remainder = newremainder
|
||||
fields['role'] = field
|
||||
score += s*0.5
|
||||
else:
|
||||
fields['role'] = None
|
||||
|
||||
field, remainder, s = self._parse_ending(remainder, self.title_separator)
|
||||
fields['title'] = field
|
||||
score += s*0.5
|
||||
|
||||
possible += 3.0
|
||||
|
||||
# Implicit roles
|
||||
if ( not fields['role'] and
|
||||
fields['filetype'] and
|
||||
fields['role'] in self.roles_by_filetype):
|
||||
self.role = self.roles_by_filetype[fields['filetype']]
|
||||
score += 0.2
|
||||
|
||||
#possible += 0.2
|
||||
|
||||
# Figure the rest out from the schema
|
||||
# Find the matching rank start position for the filename
|
||||
start = 0
|
||||
for start, (schema, name) in enumerate(zip(self.schemas, namepath)):
|
||||
field, r, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
try:
|
||||
if field.lower() == schema.format.format(name).lower():
|
||||
score += 1.0
|
||||
break
|
||||
except ValueError:
|
||||
print(' (365) field, format', field, schema.format)
|
||||
|
||||
possible += 1.0
|
||||
|
||||
# Starting from that position, try to match fields
|
||||
# up to the end of the namepath (checking against it)
|
||||
irank = 0
|
||||
for irank, (schema, name) in enumerate(
|
||||
zip(self.schemas[start:], namepath[start:])):
|
||||
if not remainder: break
|
||||
field, remainder, s = self._parse_beginning(remainder, schema.delimiter)
|
||||
score += s
|
||||
try:
|
||||
if ( type(field) == str and
|
||||
field.lower() == schema.format.format(name).lower()):
|
||||
fields[schema.rank]={'code':field}
|
||||
fields['rank'] = schema.rank
|
||||
score += 1.0
|
||||
except ValueError:
|
||||
print(' (384) field, format', field, schema.format)
|
||||
possible += 2.0
|
||||
|
||||
# Remaining fields are authoritative (doesn't affect score)
|
||||
for schema in self.schemas[irank:]:
|
||||
if not remainder: break
|
||||