2002-10-20 19:55:16 +05:30
|
|
|
#
|
|
|
|
# Gramps - a GTK+/GNOME based genealogy program
|
|
|
|
#
|
2005-01-17 23:35:50 +05:30
|
|
|
# Copyright (C) 2000-2005 Donald N. Allingham
|
2002-10-20 19:55:16 +05:30
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
#
|
|
|
|
|
2004-06-28 18:50:33 +05:30
|
|
|
# $Id$
|
2003-10-23 21:03:57 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
"Import from GEDCOM"
|
|
|
|
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# standard python modules
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import string
|
|
|
|
import const
|
|
|
|
import time
|
2006-01-23 09:39:20 +05:30
|
|
|
import logging
|
|
|
|
|
2005-02-28 07:21:21 +05:30
|
|
|
from gettext import gettext as _
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
# and module sets for earlier pythons
|
|
|
|
try:
|
|
|
|
set()
|
|
|
|
except NameError:
|
|
|
|
from sets import Set as set
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# GTK/GNOME Modules
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
import gtk
|
|
|
|
import gtk.glade
|
|
|
|
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# GRAMPS modules
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
2003-01-15 10:55:50 +05:30
|
|
|
import Errors
|
2003-01-10 11:09:40 +05:30
|
|
|
import RelLib
|
2002-10-20 19:55:16 +05:30
|
|
|
import Date
|
2004-09-17 09:00:04 +05:30
|
|
|
import DateParser
|
2005-01-09 07:48:49 +05:30
|
|
|
import DisplayTrace
|
2002-11-10 00:14:58 +05:30
|
|
|
from ansel_utf8 import ansel_to_utf8
|
2002-10-20 19:55:16 +05:30
|
|
|
import Utils
|
2005-01-27 08:48:21 +05:30
|
|
|
import GrampsMime
|
2006-01-24 03:18:34 +05:30
|
|
|
import logging
|
2005-12-06 12:08:09 +05:30
|
|
|
from bsddb import db
|
2005-12-21 16:57:05 +05:30
|
|
|
from _GedcomInfo import *
|
2006-01-18 09:38:28 +05:30
|
|
|
from _GedTokens import *
|
2005-12-06 12:08:09 +05:30
|
|
|
from QuestionDialog import ErrorDialog, WarningDialog
|
|
|
|
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# latin/utf8 conversions
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
|
|
|
|
def utf8_to_latin(s):
|
|
|
|
return s.encode('iso-8859-1','replace')
|
|
|
|
|
|
|
|
def latin_to_utf8(s):
|
|
|
|
if type(s) == type(u''):
|
|
|
|
return s
|
|
|
|
else:
|
|
|
|
return unicode(s,'iso-8859-1')
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-24 03:18:34 +05:30
|
|
|
|
2006-01-24 04:06:34 +05:30
|
|
|
log = logging.getLogger('.GEDCOM_import')
|
2006-01-24 03:18:34 +05:30
|
|
|
|
2003-01-19 11:55:20 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# constants
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
2002-10-20 19:55:16 +05:30
|
|
|
ANSEL = 1
|
|
|
|
UNICODE = 2
|
2003-01-19 11:55:20 +05:30
|
|
|
UPDATE = 25
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def nocnv(s):
|
2003-01-02 10:01:52 +05:30
|
|
|
return unicode(s)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2003-03-31 07:03:40 +05:30
|
|
|
file_systems = {
|
|
|
|
'VFAT' : _('Windows 9x file system'),
|
|
|
|
'FAT' : _('Windows 9x file system'),
|
|
|
|
"NTFS" : _('Windows NT file system'),
|
|
|
|
"ISO9660" : _('CD ROM'),
|
|
|
|
"SMBFS" : _('Networked Windows file system')
|
|
|
|
}
|
|
|
|
|
2006-01-20 01:25:58 +05:30
|
|
|
rel_types = ((RelLib.Person.CHILD_BIRTH,''),
|
|
|
|
(RelLib.Person.CHILD_UNKNOWN,''),
|
|
|
|
(RelLib.Person.CHILD_NONE,''))
|
2005-03-03 11:03:22 +05:30
|
|
|
|
2005-04-03 02:44:53 +05:30
|
|
|
pedi_type = {
|
2006-01-20 01:25:58 +05:30
|
|
|
'birth' : (RelLib.Person.CHILD_BIRTH,''),
|
|
|
|
'natural': (RelLib.Person.CHILD_BIRTH,''),
|
|
|
|
'adopted': (RelLib.Person.CHILD_ADOPTED,''),
|
|
|
|
'foster' : (RelLib.Person.CHILD_FOSTER,''),
|
2005-04-03 02:44:53 +05:30
|
|
|
}
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# GEDCOM events to GRAMPS events conversion
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
ged2gramps = {}
|
2005-06-01 01:06:28 +05:30
|
|
|
for _val in Utils.personalConstantEvents.keys():
|
|
|
|
_key = Utils.personalConstantEvents[_val]
|
2002-10-20 19:55:16 +05:30
|
|
|
if _key != "":
|
|
|
|
ged2gramps[_key] = _val
|
|
|
|
|
|
|
|
ged2fam = {}
|
2005-06-01 01:06:28 +05:30
|
|
|
for _val in Utils.familyConstantEvents.keys():
|
|
|
|
_key = Utils.familyConstantEvents[_val]
|
2002-10-20 19:55:16 +05:30
|
|
|
if _key != "":
|
|
|
|
ged2fam[_key] = _val
|
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
ged2fam_custom = {}
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# regular expressions
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
intRE = re.compile(r"\s*(\d+)\s*$")
|
|
|
|
nameRegexp= re.compile(r"/?([^/]*)(/([^/]*)(/([^/]*))?)?")
|
2004-01-19 23:13:35 +05:30
|
|
|
snameRegexp= re.compile(r"/([^/]*)/([^/]*)")
|
2002-10-20 19:55:16 +05:30
|
|
|
calRegexp = re.compile(r"\s*(ABT|BEF|AFT)?\s*@#D([^@]+)@\s*(.*)$")
|
2004-09-17 09:00:04 +05:30
|
|
|
rangeRegexp = re.compile(r"\s*BET\s+@#D([^@]+)@\s*(.*)\s+AND\s+@#D([^@]+)@\s*(.*)$")
|
|
|
|
spanRegexp = re.compile(r"\s*FROM\s+@#D([^@]+)@\s*(.*)\s+TO\s+@#D([^@]+)@\s*(.*)$")
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
2006-01-22 07:36:46 +05:30
|
|
|
def importData(database, filename, callback=None, use_trans=False):
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2004-09-25 03:35:46 +05:30
|
|
|
f = open(filename,"r")
|
|
|
|
|
|
|
|
ansel = False
|
|
|
|
gramps = False
|
2005-01-16 09:30:35 +05:30
|
|
|
for index in range(50):
|
2004-09-25 03:35:46 +05:30
|
|
|
line = f.readline().split()
|
2004-10-11 02:46:44 +05:30
|
|
|
if len(line) == 0:
|
|
|
|
break
|
2005-04-26 21:34:21 +05:30
|
|
|
if len(line) > 2 and line[1] == 'CHAR' and line[2] == "ANSEL":
|
2004-09-25 03:35:46 +05:30
|
|
|
ansel = True
|
2005-04-26 21:34:21 +05:30
|
|
|
if len(line) > 2 and line[1] == 'SOUR' and line[2] == "GRAMPS":
|
2004-09-25 03:35:46 +05:30
|
|
|
gramps = True
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
if not gramps and ansel:
|
2006-01-13 08:05:11 +05:30
|
|
|
glade_file = "%s/gedcomimport.glade" % os.path.dirname(__file__)
|
2004-09-25 03:35:46 +05:30
|
|
|
top = gtk.glade.XML(glade_file,'encoding','gramps')
|
|
|
|
code = top.get_widget('codeset')
|
|
|
|
code.set_active(0)
|
|
|
|
dialog = top.get_widget('encoding')
|
|
|
|
dialog.run()
|
|
|
|
codeset = code.get_active()
|
|
|
|
dialog.destroy()
|
|
|
|
else:
|
|
|
|
codeset = None
|
2006-01-18 09:38:28 +05:30
|
|
|
import2(database, filename, callback, codeset, use_trans)
|
2004-09-25 03:35:46 +05:30
|
|
|
|
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
def import2(database, filename, callback, codeset, use_trans):
|
2002-10-20 19:55:16 +05:30
|
|
|
# add some checking here
|
|
|
|
try:
|
2005-12-06 12:08:09 +05:30
|
|
|
np = NoteParser(filename, False)
|
2006-01-18 09:38:28 +05:30
|
|
|
g = GedcomParser(database,filename, callback, codeset, np.get_map(),
|
2005-12-06 12:08:09 +05:30
|
|
|
np.get_lines())
|
2002-10-20 19:55:16 +05:30
|
|
|
except IOError,msg:
|
2003-05-16 07:19:50 +05:30
|
|
|
ErrorDialog(_("%s could not be opened\n") % filename,str(msg))
|
2002-10-20 19:55:16 +05:30
|
|
|
return
|
2003-01-10 11:09:40 +05:30
|
|
|
except:
|
2004-11-27 08:24:31 +05:30
|
|
|
DisplayTrace.DisplayTrace()
|
2003-01-10 11:09:40 +05:30
|
|
|
return
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2004-10-19 08:49:25 +05:30
|
|
|
if database.get_number_of_people() == 0:
|
|
|
|
use_trans = False
|
|
|
|
|
2003-01-15 10:55:50 +05:30
|
|
|
try:
|
2004-10-11 02:46:44 +05:30
|
|
|
close = g.parse_gedcom_file(use_trans)
|
2003-01-15 10:55:50 +05:30
|
|
|
except IOError,msg:
|
|
|
|
errmsg = _("%s could not be opened\n") % filename
|
2003-05-16 07:19:50 +05:30
|
|
|
ErrorDialog(errmsg,str(msg))
|
2003-01-15 10:55:50 +05:30
|
|
|
return
|
|
|
|
except Errors.GedcomError, val:
|
2003-05-16 07:19:50 +05:30
|
|
|
(m1,m2) = val.messages()
|
|
|
|
ErrorDialog(m1,m2)
|
2003-01-15 10:55:50 +05:30
|
|
|
return
|
2005-12-06 12:08:09 +05:30
|
|
|
except db.DBSecondaryBadError, msg:
|
|
|
|
WarningDialog(_('Database corruption detected'),
|
|
|
|
_('A problem was detected with the database. Please '
|
|
|
|
'run the Check and Repair Database tool to fix the '
|
|
|
|
'problem.'))
|
|
|
|
return
|
2003-01-15 10:55:50 +05:30
|
|
|
except:
|
|
|
|
DisplayTrace.DisplayTrace()
|
|
|
|
return
|
2005-12-06 12:08:09 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
class DateStruct:
|
|
|
|
def __init__(self):
|
|
|
|
self.date = ""
|
|
|
|
self.time = ""
|
|
|
|
|
2004-12-26 23:53:50 +05:30
|
|
|
class GedcomDateParser(DateParser.DateParser):
|
|
|
|
|
|
|
|
month_to_int = {
|
|
|
|
'jan' : 1, 'feb' : 2, 'mar' : 3, 'apr' : 4,
|
|
|
|
'may' : 5, 'jun' : 6, 'jul' : 7, 'aug' : 8,
|
|
|
|
'sep' : 9, 'oct' : 10, 'nov' : 11, 'dec' : 12,
|
|
|
|
}
|
|
|
|
|
2005-12-06 12:08:09 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
noteRE = re.compile(r"\s*\d+\s+\@(\S+)\@\s+NOTE(.*)$")
|
|
|
|
contRE = re.compile(r"\s*\d+\s+CONT\s(.*)$")
|
|
|
|
concRE = re.compile(r"\s*\d+\s+CONC\s(.*)$")
|
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
|
|
|
|
class CurrentState:
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.note = ""
|
|
|
|
self.name_cnt = 0
|
|
|
|
self.person = None
|
|
|
|
|
|
|
|
def add_to_note(self,text):
|
|
|
|
self.note += text
|
|
|
|
|
|
|
|
def get_text(self):
|
|
|
|
return self.note
|
2005-12-06 12:08:09 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
2005-12-06 12:08:09 +05:30
|
|
|
class NoteParser:
|
|
|
|
def __init__(self, filename,broken):
|
|
|
|
self.name_map = {}
|
|
|
|
|
|
|
|
self.count = 0
|
|
|
|
f = open(filename,"rU")
|
|
|
|
innote = False
|
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
for line in f:
|
2005-12-06 12:08:09 +05:30
|
|
|
|
|
|
|
self.count += 1
|
|
|
|
if innote:
|
|
|
|
match = contRE.match(line)
|
|
|
|
if match:
|
|
|
|
noteobj.append("\n" + match.groups()[0])
|
|
|
|
continue
|
|
|
|
|
|
|
|
match = concRE.match(line)
|
|
|
|
if match:
|
|
|
|
if broken:
|
|
|
|
noteobj.append(" " + match.groups()[0])
|
|
|
|
else:
|
|
|
|
noteobj.append(match.groups()[0])
|
|
|
|
continue
|
|
|
|
innote = False
|
|
|
|
else:
|
|
|
|
match = noteRE.match(line)
|
|
|
|
if match:
|
|
|
|
data = match.groups()[0]
|
|
|
|
noteobj = RelLib.Note()
|
|
|
|
self.name_map["@%s@" % data] = noteobj
|
|
|
|
noteobj.append(match.groups()[1])
|
|
|
|
innote = True
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
def get_map(self):
|
|
|
|
return self.name_map
|
|
|
|
|
|
|
|
def get_lines(self):
|
|
|
|
return self.count
|
|
|
|
|
2006-01-24 03:18:34 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# Reader - serves as the lexical analysis engine
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
class Reader:
|
|
|
|
|
|
|
|
def __init__(self,name):
|
|
|
|
self.f = open(name,'rU')
|
|
|
|
self.current_list = []
|
|
|
|
self.eof = False
|
|
|
|
self.transtable = string.maketrans('','')
|
|
|
|
self.delc = self.transtable[0:31]
|
|
|
|
self.transtable2 = self.transtable[0:128] + ('?' * 128)
|
|
|
|
self.cnv = lambda s: unicode(s)
|
|
|
|
self.broken_conc = False
|
|
|
|
|
|
|
|
def set_charset_fn(self,cnv):
|
|
|
|
self.cnv = cnv
|
|
|
|
|
|
|
|
def set_broken_conc(self,broken):
|
|
|
|
self.broken_conc = broken
|
|
|
|
|
|
|
|
def read(self):
|
|
|
|
if len(self.current_list) <= 1 and not self.eof:
|
|
|
|
self.readahead()
|
|
|
|
try:
|
|
|
|
d = self.current_list.pop()
|
|
|
|
return d
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def readahead(self):
|
|
|
|
while len(self.current_list) < 5:
|
|
|
|
line = self.f.readline()
|
|
|
|
if line == "":
|
|
|
|
self.f.close()
|
|
|
|
self.eof = True
|
|
|
|
break
|
|
|
|
line = line.rstrip('\r\n').split(None,2) + ['']
|
|
|
|
|
|
|
|
val = line[2].translate(self.transtable,self.delc)
|
|
|
|
try:
|
|
|
|
val = self.cnv(val)
|
|
|
|
except:
|
|
|
|
val = line[2].translate(val,self.transtable2)
|
|
|
|
|
|
|
|
try:
|
|
|
|
level = int(line[0])
|
|
|
|
except:
|
|
|
|
level = 0
|
|
|
|
|
|
|
|
data = (level,tokens.get(line[1],TOKEN_UNKNOWN),val,line[1])
|
|
|
|
|
|
|
|
if data[1] == TOKEN_CONT:
|
|
|
|
l = self.current_list[0]
|
|
|
|
self.current_list[0] = (l[0],l[1],l[2]+'\n'+data[2],l[3])
|
|
|
|
elif data[1] == TOKEN_CONC:
|
|
|
|
l = self.current_list[0]
|
|
|
|
if self.broken_conc:
|
|
|
|
new_value = "%s %s" % (l[2],data[2])
|
|
|
|
else:
|
|
|
|
new_value = l[2] + data[2]
|
|
|
|
self.current_list[0] = (l[0],l[1],new_value,l[3])
|
|
|
|
else:
|
|
|
|
self.current_list.insert(0,data)
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
class GedcomParser:
|
|
|
|
|
|
|
|
SyntaxError = "Syntax Error"
|
|
|
|
BadFile = "Not a GEDCOM file"
|
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
def __init__(self, dbase, filename, callback, codeset, note_map, lines):
|
|
|
|
|
|
|
|
self.maxlines = lines
|
|
|
|
self.callback = callback
|
2004-12-26 23:53:50 +05:30
|
|
|
self.dp = GedcomDateParser()
|
2003-01-10 11:09:40 +05:30
|
|
|
self.db = dbase
|
2002-10-20 19:55:16 +05:30
|
|
|
self.person = None
|
2005-05-11 19:57:32 +05:30
|
|
|
self.inline_srcs = {}
|
2004-11-25 07:36:05 +05:30
|
|
|
self.media_map = {}
|
2002-10-20 19:55:16 +05:30
|
|
|
self.fmap = {}
|
|
|
|
self.smap = {}
|
2005-12-06 12:08:09 +05:30
|
|
|
self.note_map = note_map
|
2002-10-20 19:55:16 +05:30
|
|
|
self.refn = {}
|
|
|
|
self.added = {}
|
|
|
|
self.gedmap = GedcomInfoDB()
|
2005-05-11 19:57:32 +05:30
|
|
|
self.gedsource = self.gedmap.get_from_source_tag('GEDCOM 5.5')
|
2004-11-27 05:01:50 +05:30
|
|
|
self.def_src = RelLib.Source()
|
2004-11-27 08:24:31 +05:30
|
|
|
fname = os.path.basename(filename).split('\\')[-1]
|
|
|
|
self.def_src.set_title(_("Import from %s") % unicode(fname))
|
|
|
|
self.dir_path = os.path.dirname(filename)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.localref = 0
|
|
|
|
self.placemap = {}
|
|
|
|
self.broken_conc_list = [ 'FamilyOrigins', 'FTW' ]
|
|
|
|
self.is_ftw = 0
|
2004-06-27 08:40:06 +05:30
|
|
|
self.idswap = {}
|
2004-08-01 09:51:31 +05:30
|
|
|
self.gid2id = {}
|
2004-08-11 09:12:38 +05:30
|
|
|
self.sid2id = {}
|
|
|
|
self.lid2id = {}
|
|
|
|
self.fid2id = {}
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
self.name_func = {
|
|
|
|
TOKEN_ALIA : self.func_name_alia,
|
|
|
|
TOKEN_ALIA : self.func_name_alia,
|
|
|
|
TOKEN_NPFX : self.func_name_npfx,
|
|
|
|
TOKEN_GIVN : self.func_name_givn,
|
|
|
|
TOKEN_SPFX : self.func_name_spfx,
|
|
|
|
TOKEN_SURN : self.func_name_surn,
|
|
|
|
TOKEN__MARNM : self.func_name_marnm,
|
|
|
|
TOKEN_TITL : self.func_name_titl,
|
|
|
|
TOKEN_NSFX : self.func_name_nsfx,
|
|
|
|
TOKEN_NICK : self.func_name_nick,
|
|
|
|
TOKEN__AKA : self.func_name_aka,
|
|
|
|
TOKEN_SOUR : self.func_name_sour,
|
|
|
|
TOKEN_NOTE : self.func_name_note,
|
|
|
|
}
|
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
self.person_func = {
|
|
|
|
TOKEN_NAME : self.func_person_name,
|
|
|
|
TOKEN_ALIA : self.func_person_alt_name,
|
|
|
|
TOKEN__ALIA : self.func_person_alt_name,
|
|
|
|
TOKEN_OBJE : self.func_person_object,
|
|
|
|
TOKEN_NOTE : self.func_person_note,
|
|
|
|
TOKEN__COMM : self.func_person_note,
|
|
|
|
TOKEN_SEX : self.func_person_sex,
|
|
|
|
TOKEN_BAPL : self.func_person_bapl,
|
|
|
|
TOKEN_ENDL : self.func_person_endl,
|
|
|
|
TOKEN_SLGC : self.func_person_slgc,
|
|
|
|
TOKEN_FAMS : self.func_person_fams,
|
|
|
|
TOKEN_FAMC : self.func_person_famc,
|
|
|
|
TOKEN_RESI : self.func_person_resi,
|
|
|
|
TOKEN_ADDR : self.func_person_addr,
|
|
|
|
TOKEN_PHON : self.func_person_phon,
|
|
|
|
TOKEN_BIRT : self.func_person_birt,
|
|
|
|
TOKEN_ADOP : self.func_person_adop,
|
|
|
|
TOKEN_DEAT : self.func_person_deat,
|
|
|
|
TOKEN_EVEN : self.func_person_even,
|
|
|
|
TOKEN_SOUR : self.func_person_sour,
|
|
|
|
TOKEN_REFN : self.func_person_refn,
|
|
|
|
TOKEN_AFN : self.func_person_attr,
|
|
|
|
TOKEN_RFN : self.func_person_attr,
|
|
|
|
TOKEN__UID : self.func_person_attr,
|
|
|
|
TOKEN_CHAN : self.skip_record,
|
|
|
|
TOKEN_ASSO : self.skip_record,
|
|
|
|
TOKEN_ANCI : self.skip_record,
|
|
|
|
TOKEN_DESI : self.skip_record,
|
|
|
|
TOKEN_RIN : self.skip_record,
|
|
|
|
TOKEN__TODO : self.skip_record,
|
|
|
|
}
|
|
|
|
|
|
|
|
self.place_names = set()
|
2005-07-09 01:54:54 +05:30
|
|
|
cursor = dbase.get_place_cursor()
|
|
|
|
data = cursor.next()
|
|
|
|
while data:
|
|
|
|
(handle,val) = data
|
|
|
|
self.place_names.add(val[2])
|
|
|
|
data = cursor.next()
|
|
|
|
cursor.close()
|
|
|
|
|
2006-01-24 03:18:34 +05:30
|
|
|
self.lexer = Reader(filename)
|
2004-11-27 08:24:31 +05:30
|
|
|
self.filename = filename
|
2002-10-20 19:55:16 +05:30
|
|
|
self.index = 0
|
|
|
|
self.backoff = 0
|
2004-10-16 22:56:04 +05:30
|
|
|
self.override = codeset
|
2004-09-25 03:35:46 +05:30
|
|
|
|
2004-10-19 08:49:25 +05:30
|
|
|
if self.db.get_number_of_people() == 0:
|
|
|
|
self.map_gid = self.map_gid_empty
|
|
|
|
else:
|
|
|
|
self.map_gid = self.map_gid_not_empty
|
|
|
|
|
2004-10-16 22:56:04 +05:30
|
|
|
if self.override != 0:
|
|
|
|
if self.override == 1:
|
2006-01-24 03:18:34 +05:30
|
|
|
self.lexer.set_charset_fn(ansel_to_utf8)
|
2004-10-16 22:56:04 +05:30
|
|
|
elif self.override == 2:
|
2006-01-24 03:18:34 +05:30
|
|
|
self.lexer.set_charset_fn(latin_to_utf8)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2004-11-27 08:24:31 +05:30
|
|
|
self.geddir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
|
2003-03-31 07:03:40 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
self.error_count = 0
|
2005-06-01 01:06:28 +05:30
|
|
|
amap = Utils.personalConstantAttributes
|
2006-01-18 09:38:28 +05:30
|
|
|
self.current = 0
|
|
|
|
self.oldval = 0
|
2005-12-06 12:08:09 +05:30
|
|
|
|
2004-10-06 09:12:54 +05:30
|
|
|
self.attrs = amap.values()
|
2002-10-20 19:55:16 +05:30
|
|
|
self.gedattr = {}
|
2004-10-06 09:12:54 +05:30
|
|
|
for val in amap.keys():
|
|
|
|
self.gedattr[amap[val]] = val
|
2003-01-19 11:55:20 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
self.search_paths = []
|
|
|
|
|
|
|
|
try:
|
2003-03-31 07:03:40 +05:30
|
|
|
mypaths = []
|
|
|
|
f = open("/proc/mounts","r")
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
for line in f:
|
2004-10-23 09:26:48 +05:30
|
|
|
paths = line.split()
|
2003-03-31 07:03:40 +05:30
|
|
|
ftype = paths[2].upper()
|
|
|
|
if ftype in file_systems.keys():
|
|
|
|
mypaths.append((paths[1],file_systems[ftype]))
|
2002-10-20 19:55:16 +05:30
|
|
|
self.search_paths.append(paths[1])
|
|
|
|
f.close()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2003-02-10 09:41:01 +05:30
|
|
|
def errmsg(self,msg):
|
2006-01-24 03:18:34 +05:30
|
|
|
log.warning(msg)
|
2003-02-10 09:41:01 +05:30
|
|
|
|
2003-03-31 07:03:40 +05:30
|
|
|
def infomsg(self,msg):
|
2006-01-24 03:18:34 +05:30
|
|
|
log.warning(msg)
|
2003-03-31 07:03:40 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def find_file(self,fullname,altpath):
|
2003-03-31 07:03:40 +05:30
|
|
|
tries = []
|
2004-10-23 09:26:48 +05:30
|
|
|
fullname = fullname.replace('\\','/')
|
2003-03-31 07:03:40 +05:30
|
|
|
tries.append(fullname)
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
if os.path.isfile(fullname):
|
2003-03-31 07:03:40 +05:30
|
|
|
return (1,fullname)
|
2004-11-24 09:25:25 +05:30
|
|
|
other = os.path.join(altpath,fullname)
|
|
|
|
tries.append(other)
|
|
|
|
if os.path.isfile(other):
|
|
|
|
return (1,other)
|
2002-10-20 19:55:16 +05:30
|
|
|
other = os.path.join(altpath,os.path.basename(fullname))
|
2003-03-31 07:03:40 +05:30
|
|
|
tries.append(other)
|
2002-10-20 19:55:16 +05:30
|
|
|
if os.path.isfile(other):
|
2003-03-31 07:03:40 +05:30
|
|
|
return (1,other)
|
2002-10-20 19:55:16 +05:30
|
|
|
if len(fullname) > 3:
|
|
|
|
if fullname[1] == ':':
|
|
|
|
fullname = fullname[2:]
|
|
|
|
for path in self.search_paths:
|
2003-03-31 07:03:40 +05:30
|
|
|
other = os.path.normpath("%s/%s" % (path,fullname))
|
|
|
|
tries.append(other)
|
2002-10-20 19:55:16 +05:30
|
|
|
if os.path.isfile(other):
|
2003-03-31 07:03:40 +05:30
|
|
|
return (1,other)
|
|
|
|
return (0,tries)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2003-03-31 07:03:40 +05:30
|
|
|
return (0,tries)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
def track_lines(self):
|
|
|
|
self.current += 1
|
|
|
|
newval = int((100*self.current)/self.maxlines)
|
|
|
|
if self.callback and newval != self.oldval:
|
|
|
|
self.callback(newval)
|
|
|
|
self.oldval = newval
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def get_next(self):
|
2006-01-23 06:57:46 +05:30
|
|
|
if self.backoff == 0:
|
2006-01-24 03:18:34 +05:30
|
|
|
next_line = self.lexer.read()
|
2006-01-23 06:57:46 +05:30
|
|
|
self.track_lines()
|
|
|
|
|
|
|
|
# EOF ?
|
2006-01-24 03:18:34 +05:30
|
|
|
if next_line == None:
|
2006-01-23 06:57:46 +05:30
|
|
|
self.index += 1
|
|
|
|
self.text = "";
|
|
|
|
self.backoff = 0
|
2006-01-24 04:06:34 +05:30
|
|
|
msg = _("Premature end of file at line %d.\n") % self.index
|
2006-01-23 06:57:46 +05:30
|
|
|
self.errmsg(msg)
|
|
|
|
self.error_count = self.error_count + 1
|
2006-01-24 03:18:34 +05:30
|
|
|
self.groups = (-1, TOKEN_UNKNOWN, "","")
|
2006-01-23 06:57:46 +05:30
|
|
|
return self.groups
|
|
|
|
|
2006-01-24 03:18:34 +05:30
|
|
|
self.groups = next_line
|
2006-01-23 06:57:46 +05:30
|
|
|
self.index += 1
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backoff = 0
|
|
|
|
return self.groups
|
|
|
|
|
|
|
|
def barf(self,level):
|
2006-01-24 04:06:34 +05:30
|
|
|
msg = _("Line %d was not understood, so it was ignored.") % self.index
|
2003-05-21 03:28:24 +05:30
|
|
|
self.errmsg(msg)
|
|
|
|
self.error_count = self.error_count + 1
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level)
|
|
|
|
|
|
|
|
def warn(self,msg):
|
2003-05-21 03:28:24 +05:30
|
|
|
self.errmsg(msg)
|
|
|
|
self.error_count = self.error_count + 1
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def backup(self):
|
|
|
|
self.backoff = 1
|
|
|
|
|
2006-01-22 08:13:55 +05:30
|
|
|
def parse_gedcom_file(self,use_trans=False):
|
2004-10-11 02:46:44 +05:30
|
|
|
|
2006-01-18 03:20:39 +05:30
|
|
|
self.trans = self.db.transaction_begin("",not use_trans)
|
|
|
|
#self.trans.set_batch(not use_trans)
|
2005-04-05 05:22:46 +05:30
|
|
|
self.db.disable_signals()
|
2002-10-20 19:55:16 +05:30
|
|
|
t = time.time()
|
|
|
|
self.index = 0
|
|
|
|
self.fam_count = 0
|
|
|
|
self.indi_count = 0
|
2004-10-16 10:40:35 +05:30
|
|
|
self.source_count = 0
|
2002-10-20 19:55:16 +05:30
|
|
|
try:
|
|
|
|
self.parse_header()
|
|
|
|
self.parse_submitter()
|
2004-11-27 05:01:50 +05:30
|
|
|
self.db.add_source(self.def_src,self.trans)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_record()
|
|
|
|
self.parse_trailer()
|
2003-01-15 10:55:50 +05:30
|
|
|
except Errors.GedcomError, err:
|
2003-02-10 09:41:01 +05:30
|
|
|
self.errmsg(str(err))
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2005-05-11 19:57:32 +05:30
|
|
|
for value in self.inline_srcs.keys():
|
|
|
|
title,note = value
|
|
|
|
handle = self.inline_srcs[value]
|
|
|
|
src = RelLib.Source()
|
|
|
|
src.set_handle(handle)
|
|
|
|
src.set_title(title)
|
|
|
|
if note:
|
|
|
|
src.set_note(note)
|
|
|
|
self.db.add_source(src,self.trans)
|
2003-01-19 11:55:20 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
t = time.time() - t
|
2002-11-03 12:05:06 +05:30
|
|
|
msg = _('Import Complete: %d seconds') % t
|
2004-05-20 10:11:55 +05:30
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
if self.callback:
|
|
|
|
self.callback(100)
|
2005-12-15 10:15:20 +05:30
|
|
|
self.db.transaction_commit(self.trans,_("GEDCOM import"))
|
2005-04-05 05:22:46 +05:30
|
|
|
self.db.enable_signals()
|
|
|
|
self.db.request_rebuild()
|
2004-05-20 10:11:55 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def parse_trailer(self):
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2006-01-18 09:38:28 +05:30
|
|
|
if matches[0] >= 0 and matches[1] != TOKEN_TRLR:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(0)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_header(self):
|
2004-01-05 09:27:01 +05:30
|
|
|
self.parse_header_head()
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_header_source()
|
|
|
|
|
|
|
|
def parse_submitter(self):
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2002-10-20 19:55:16 +05:30
|
|
|
if matches[2] != "SUBM":
|
|
|
|
self.backup()
|
2004-01-05 09:27:01 +05:30
|
|
|
return
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-11-27 05:01:50 +05:30
|
|
|
self.parse_submitter_data(1)
|
|
|
|
|
|
|
|
def parse_submitter_data(self,level):
|
|
|
|
while(1):
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NAME:
|
2005-05-24 18:38:06 +05:30
|
|
|
self.def_src.set_author(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_ADDR:
|
2004-11-27 05:01:50 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_source(self,name,level):
|
2004-08-11 09:12:38 +05:30
|
|
|
self.source = self.find_or_create_source(name[1:-1])
|
2002-10-20 19:55:16 +05:30
|
|
|
note = ""
|
2005-12-06 12:08:09 +05:30
|
|
|
while True:
|
2002-10-20 19:55:16 +05:30
|
|
|
matches = self.get_next()
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2004-02-14 11:10:30 +05:30
|
|
|
if not self.source.get_title():
|
2004-08-21 23:43:18 +05:30
|
|
|
self.source.set_title("No title - ID %s" % self.source.get_gramps_id())
|
2004-05-20 10:11:55 +05:30
|
|
|
self.db.commit_source(self.source, self.trans)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TITL:
|
2006-01-24 03:18:34 +05:30
|
|
|
title = matches[2]
|
2004-10-23 09:26:48 +05:30
|
|
|
title = title.replace('\n',' ')
|
2004-02-14 11:10:30 +05:30
|
|
|
self.source.set_title(title)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_TAXT,TOKEN_PERI): # EasyTree Sierra On-Line
|
2004-02-14 11:10:30 +05:30
|
|
|
if self.source.get_title() == "":
|
2006-01-24 03:18:34 +05:30
|
|
|
title = matches[2]
|
2004-10-23 09:26:48 +05:30
|
|
|
title = title.replace('\n',' ')
|
2004-02-14 11:10:30 +05:30
|
|
|
self.source.set_title(title)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_AUTH:
|
2006-01-24 03:18:34 +05:30
|
|
|
self.source.set_author(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PUBL:
|
2006-01-24 03:18:34 +05:30
|
|
|
self.source.set_publication_info(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2002-10-20 19:55:16 +05:30
|
|
|
note = self.parse_note(matches,self.source,level+1,note)
|
2005-12-06 12:08:09 +05:30
|
|
|
self.source.set_note(note)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TEXT:
|
2004-02-14 11:10:30 +05:30
|
|
|
note = self.source.get_note()
|
2005-12-06 12:08:09 +05:30
|
|
|
self.source.set_note(note.strip())
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_ABBR:
|
2006-01-24 03:18:34 +05:30
|
|
|
self.source.set_abbreviation(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_OBJE,TOKEN_CHAN,TOKEN__CAT):
|
2005-12-06 12:08:09 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-02-14 11:10:30 +05:30
|
|
|
note = self.source.get_note()
|
2002-10-20 19:55:16 +05:30
|
|
|
if note:
|
2006-01-18 09:38:28 +05:30
|
|
|
note = "%s\n%s %s" % (note,matches[3],matches[2])
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2006-01-18 09:38:28 +05:30
|
|
|
note = "%s %s" % (matches[3],matches[2])
|
2005-12-06 12:08:09 +05:30
|
|
|
self.source.set_note(note.strip())
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_record(self):
|
2005-05-24 18:38:06 +05:30
|
|
|
while True:
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2002-10-20 19:55:16 +05:30
|
|
|
if matches[2] == "FAM":
|
|
|
|
self.fam_count = self.fam_count + 1
|
2006-01-18 09:38:28 +05:30
|
|
|
self.family = self.find_or_create_family(matches[3][1:-1])
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_family()
|
|
|
|
if self.addr != None:
|
2004-08-21 23:43:18 +05:30
|
|
|
father_handle = self.family.get_father_handle()
|
|
|
|
father = self.db.get_person_from_handle(father_handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
if father:
|
2004-02-14 11:10:30 +05:30
|
|
|
father.add_address(self.addr)
|
2004-05-20 10:11:55 +05:30
|
|
|
self.db.commit_person(father, self.trans)
|
2004-08-21 23:43:18 +05:30
|
|
|
mother_handle = self.family.get_mother_handle()
|
|
|
|
mother = self.db.get_person_from_handle(mother_handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
if mother:
|
2004-02-14 11:10:30 +05:30
|
|
|
mother.add_address(self.addr)
|
2004-05-20 10:11:55 +05:30
|
|
|
self.db.commit_person(mother, self.trans)
|
2004-08-21 23:43:18 +05:30
|
|
|
for child_handle in self.family.get_child_handle_list():
|
|
|
|
child = self.db.get_person_from_handle(child_handle)
|
|
|
|
if child:
|
|
|
|
child.add_address(self.addr)
|
|
|
|
self.db.commit_person(child, self.trans)
|
2006-01-18 22:21:06 +05:30
|
|
|
if len(self.family.get_source_references()) == 0:
|
|
|
|
sref = RelLib.SourceRef()
|
|
|
|
sref.set_base_handle(self.def_src.get_handle())
|
|
|
|
self.family.add_source_reference(sref)
|
2004-05-20 10:11:55 +05:30
|
|
|
self.db.commit_family(self.family, self.trans)
|
2004-05-04 10:04:48 +05:30
|
|
|
del self.family
|
2002-10-20 19:55:16 +05:30
|
|
|
elif matches[2] == "INDI":
|
|
|
|
self.indi_count = self.indi_count + 1
|
2006-01-18 09:38:28 +05:30
|
|
|
gid = matches[3]
|
2004-10-06 09:12:54 +05:30
|
|
|
gid = gid[1:-1]
|
|
|
|
self.person = self.find_or_create_person(self.map_gid(gid))
|
2004-07-28 07:59:07 +05:30
|
|
|
self.added[self.person.get_handle()] = 1
|
2006-01-22 07:36:46 +05:30
|
|
|
self.parse_individual(self.person)
|
2006-01-18 22:21:06 +05:30
|
|
|
if len(self.person.get_source_references()) == 0:
|
|
|
|
sref = RelLib.SourceRef()
|
|
|
|
sref.set_base_handle(self.def_src.get_handle())
|
|
|
|
self.person.add_source_reference(sref)
|
2004-05-20 10:11:55 +05:30
|
|
|
self.db.commit_person(self.person, self.trans)
|
2004-05-04 10:04:48 +05:30
|
|
|
del self.person
|
2003-01-19 11:55:20 +05:30
|
|
|
elif matches[2] in ["SUBM","SUBN","REPO"]:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_SUBM,TOKEN_SUBN,TOKEN_OBJE,TOKEN__EVENT_DEFN):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(1)
|
|
|
|
elif matches[2] == "SOUR":
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_source(matches[3],1)
|
2005-05-24 18:38:06 +05:30
|
|
|
elif matches[2].startswith("SOUR "):
|
|
|
|
# A source formatted in a single line, for example:
|
|
|
|
# 0 @S62@ SOUR This is the title of the source
|
2006-01-18 09:38:28 +05:30
|
|
|
source = self.find_or_create_source(matches[3][1:-1])
|
2005-05-24 18:38:06 +05:30
|
|
|
source.set_title( matches[2][5:])
|
|
|
|
self.db.commit_source(source, self.trans)
|
2002-10-20 19:55:16 +05:30
|
|
|
elif matches[2][0:4] == "NOTE":
|
2005-12-06 12:08:09 +05:30
|
|
|
self.ignore_sub_junk(1)
|
2005-04-26 21:34:21 +05:30
|
|
|
elif matches[2] == "_LOC":
|
|
|
|
# TODO: Add support for extended Locations.
|
|
|
|
# See: http://en.wiki.genealogy.net/index.php/Gedcom_5.5EL
|
|
|
|
self.ignore_sub_junk(1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[0] < 0 or matches[1] == TOKEN_TRLR:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2004-10-19 08:49:25 +05:30
|
|
|
def map_gid_empty(self,gid):
|
|
|
|
return gid
|
|
|
|
|
|
|
|
def map_gid_not_empty(self,gid):
|
2004-10-06 09:12:54 +05:30
|
|
|
if self.idswap.get(gid):
|
|
|
|
return self.idswap[gid]
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-10-06 09:12:54 +05:30
|
|
|
if self.db.id_trans.get(str(gid)):
|
2005-01-17 23:35:50 +05:30
|
|
|
self.idswap[gid] = self.db.find_next_person_gramps_id()
|
2004-06-27 08:40:06 +05:30
|
|
|
else:
|
2004-10-06 09:12:54 +05:30
|
|
|
self.idswap[gid] = gid
|
|
|
|
return self.idswap[gid]
|
2004-06-27 08:40:06 +05:30
|
|
|
|
2004-08-01 09:51:31 +05:30
|
|
|
def find_or_create_person(self,gramps_id):
|
|
|
|
person = RelLib.Person()
|
|
|
|
intid = self.gid2id.get(gramps_id)
|
2006-01-07 03:38:40 +05:30
|
|
|
if self.db.has_person_handle(intid):
|
|
|
|
person.unserialize(self.db.get_raw_person_data(intid))
|
2004-08-01 09:51:31 +05:30
|
|
|
else:
|
2004-10-19 08:49:25 +05:30
|
|
|
intid = self.find_person_handle(gramps_id)
|
2004-08-01 09:51:31 +05:30
|
|
|
person.set_handle(intid)
|
|
|
|
person.set_gramps_id(gramps_id)
|
2002-10-20 19:55:16 +05:30
|
|
|
return person
|
|
|
|
|
2004-10-19 08:49:25 +05:30
|
|
|
def find_person_handle(self,gramps_id):
|
|
|
|
intid = self.gid2id.get(gramps_id)
|
|
|
|
if not intid:
|
2004-10-23 09:26:48 +05:30
|
|
|
intid = create_id()
|
2004-10-19 08:49:25 +05:30
|
|
|
self.gid2id[gramps_id] = intid
|
|
|
|
return intid
|
|
|
|
|
|
|
|
def find_or_create_family(self,gramps_id):
|
|
|
|
family = RelLib.Family()
|
|
|
|
intid = self.fid2id.get(gramps_id)
|
2006-01-07 03:38:40 +05:30
|
|
|
if self.db.has_family_handle(intid):
|
|
|
|
family.unserialize(self.db.get_raw_family_data(intid))
|
2004-10-19 08:49:25 +05:30
|
|
|
else:
|
|
|
|
intid = self.find_family_handle(gramps_id)
|
|
|
|
family.set_handle(intid)
|
|
|
|
family.set_gramps_id(gramps_id)
|
|
|
|
return family
|
|
|
|
|
|
|
|
def find_family_handle(self,gramps_id):
|
|
|
|
intid = self.fid2id.get(gramps_id)
|
|
|
|
if not intid:
|
2004-10-23 09:26:48 +05:30
|
|
|
intid = create_id()
|
2004-10-19 08:49:25 +05:30
|
|
|
self.fid2id[gramps_id] = intid
|
|
|
|
return intid
|
|
|
|
|
2004-08-11 09:12:38 +05:30
|
|
|
def find_or_create_source(self,gramps_id):
|
|
|
|
source = RelLib.Source()
|
|
|
|
intid = self.sid2id.get(gramps_id)
|
2006-01-07 03:38:40 +05:30
|
|
|
if self.db.has_source_handle(intid):
|
|
|
|
source.unserialize(self.db.get_raw_source_data(intid))
|
2004-08-11 09:12:38 +05:30
|
|
|
else:
|
2004-10-23 09:26:48 +05:30
|
|
|
intid = create_id()
|
2004-08-11 09:12:38 +05:30
|
|
|
source.set_handle(intid)
|
|
|
|
source.set_gramps_id(gramps_id)
|
2004-10-18 04:47:30 +05:30
|
|
|
self.db.add_source(source,self.trans)
|
2004-08-11 09:12:38 +05:30
|
|
|
self.sid2id[gramps_id] = intid
|
|
|
|
return source
|
|
|
|
|
2005-07-09 01:54:54 +05:30
|
|
|
def find_or_create_place(self,title):
|
2004-08-11 09:12:38 +05:30
|
|
|
place = RelLib.Place()
|
2005-07-09 01:54:54 +05:30
|
|
|
|
|
|
|
# check to see if we've encountered this name before
|
|
|
|
# if we haven't we need to get a new GRAMPS ID
|
|
|
|
intid = self.lid2id.get(title)
|
|
|
|
if intid == None:
|
|
|
|
new_id = self.db.find_next_place_gramps_id()
|
|
|
|
else:
|
|
|
|
new_id = None
|
|
|
|
|
|
|
|
# check to see if the name already existed in the database
|
|
|
|
# if it does, create a new name by appending the GRAMPS ID.
|
|
|
|
# generate a GRAMPS ID if needed
|
|
|
|
|
|
|
|
if title in self.place_names:
|
|
|
|
if not new_id:
|
|
|
|
new_id = self.db.find_next_place_gramps_id()
|
|
|
|
pname = "%s [%s]" % (title,new_id)
|
|
|
|
else:
|
|
|
|
pname = title
|
|
|
|
|
2006-01-07 03:38:40 +05:30
|
|
|
if self.db.has_place_handle(intid):
|
|
|
|
place.unserialize(self.db.get_raw_place_data(intid))
|
2004-08-11 09:12:38 +05:30
|
|
|
else:
|
2004-10-23 09:26:48 +05:30
|
|
|
intid = create_id()
|
2004-08-11 09:12:38 +05:30
|
|
|
place.set_handle(intid)
|
2005-07-09 01:54:54 +05:30
|
|
|
place.set_title(pname)
|
|
|
|
place.set_gramps_id(new_id)
|
2004-10-16 22:56:04 +05:30
|
|
|
self.db.add_place(place,self.trans)
|
2005-07-09 01:54:54 +05:30
|
|
|
self.lid2id[title] = intid
|
2004-08-11 09:12:38 +05:30
|
|
|
return place
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def parse_cause(self,event,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.add_source_reference(self.handle_source(matches,level+1))
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_note_data(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_SOUR,TOKEN_CHAN,TOKEN_REFN):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_RIN:
|
2002-10-20 19:55:16 +05:30
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self.barf(level+1)
|
|
|
|
|
|
|
|
def parse_ftw_relations(self,level):
|
2006-01-20 01:25:58 +05:30
|
|
|
mrel = (RelLib.Person.CHILD_BIRTH,'')
|
|
|
|
frel = (RelLib.Person.CHILD_BIRTH,'')
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return (mrel,frel)
|
|
|
|
# FTW
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__FREL:
|
2006-01-20 01:25:58 +05:30
|
|
|
frel = pedi_type.get(matches[2].lower(),(RelLib.Person.CHILD_BIRTH,''))
|
2002-10-20 19:55:16 +05:30
|
|
|
# FTW
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__MREL:
|
2006-01-20 01:25:58 +05:30
|
|
|
mrel = pedi_type.get(matches[2].lower(),(RelLib.Person.CHILD_BIRTH,''))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_ADOP:
|
2006-01-20 01:25:58 +05:30
|
|
|
mrel = (RelLib.Person.CHILD_ADOPTED,'')
|
|
|
|
frel = (RelLib.Person.CHILD_ADOPTED,'')
|
2002-10-20 19:55:16 +05:30
|
|
|
# Legacy
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__STAT:
|
2006-01-20 01:25:58 +05:30
|
|
|
mrel = (RelLib.Person.CHILD_BIRTH,'')
|
|
|
|
frel = (RelLib.Person.CHILD_BIRTH,'')
|
2002-10-20 19:55:16 +05:30
|
|
|
# Legacy _PREF
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1][0] == TOKEN_UNKNOWN:
|
2002-10-20 19:55:16 +05:30
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_family(self):
|
|
|
|
self.addr = None
|
|
|
|
note = ""
|
|
|
|
while 1:
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2005-01-17 23:35:50 +05:30
|
|
|
if int(matches[0]) < 1:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_HUSB:
|
2004-10-06 09:12:54 +05:30
|
|
|
gid = matches[2]
|
2004-10-19 08:49:25 +05:30
|
|
|
handle = self.find_person_handle(self.map_gid(gid[1:-1]))
|
|
|
|
self.family.set_father_handle(handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_WIFE:
|
2004-10-06 09:12:54 +05:30
|
|
|
gid = matches[2]
|
2004-10-19 08:49:25 +05:30
|
|
|
handle = self.find_person_handle(self.map_gid(gid[1:-1]))
|
|
|
|
self.family.set_mother_handle(handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SLGS:
|
2004-10-06 09:12:54 +05:30
|
|
|
lds_ord = RelLib.LdsOrd()
|
|
|
|
self.family.set_lds_sealing(lds_ord)
|
|
|
|
self.parse_ord(lds_ord,2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_ADDR:
|
2003-01-10 11:09:40 +05:30
|
|
|
self.addr = RelLib.Address()
|
2006-01-24 03:18:34 +05:30
|
|
|
self.addr.set_street(matches[2])
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_address(self.addr,2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CHIL:
|
2002-10-20 19:55:16 +05:30
|
|
|
mrel,frel = self.parse_ftw_relations(2)
|
2004-10-06 09:12:54 +05:30
|
|
|
gid = matches[2]
|
|
|
|
child = self.find_or_create_person(self.map_gid(gid[1:-1]))
|
2004-07-28 07:59:07 +05:30
|
|
|
self.family.add_child_handle(child.get_handle())
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-20 01:25:58 +05:30
|
|
|
change = False
|
|
|
|
|
2004-07-28 07:59:07 +05:30
|
|
|
for f in child.get_parent_family_handle_list():
|
|
|
|
if f[0] == self.family.get_handle():
|
2006-01-20 01:25:58 +05:30
|
|
|
if (mrel != f[1] or frel != f[2]):
|
|
|
|
change = True
|
|
|
|
child.change_parent_family_handle(self.family.get_handle(),
|
|
|
|
mrel, frel)
|
2002-10-20 19:55:16 +05:30
|
|
|
break
|
|
|
|
else:
|
2006-01-20 01:25:58 +05:30
|
|
|
change = True
|
2005-04-05 05:22:46 +05:30
|
|
|
if mrel in rel_types and frel in rel_types:
|
2004-07-28 07:59:07 +05:30
|
|
|
child.set_main_parent_family_handle(self.family.get_handle())
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2006-01-20 01:25:58 +05:30
|
|
|
if child.get_main_parents_family_handle() == self.family.handle:
|
2004-07-28 07:59:07 +05:30
|
|
|
child.set_main_parent_family_handle(None)
|
2006-01-20 01:25:58 +05:30
|
|
|
if change:
|
|
|
|
self.db.commit_person(child, self.trans)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NCHI:
|
2003-01-10 11:09:40 +05:30
|
|
|
a = RelLib.Attribute()
|
2004-02-14 11:10:30 +05:30
|
|
|
a.set_type("Number of Children")
|
|
|
|
a.set_value(matches[2])
|
|
|
|
self.family.add_attribute(a)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2005-02-28 00:26:31 +05:30
|
|
|
source_ref = self.handle_source(matches,2)
|
|
|
|
self.family.add_source_reference(source_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_RIN, TOKEN_SUBM, TOKEN_REFN,TOKEN_CHAN):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_OBJE:
|
2002-10-20 19:55:16 +05:30
|
|
|
if matches[2] and matches[2][0] == '@':
|
|
|
|
self.barf(2)
|
|
|
|
else:
|
|
|
|
self.parse_family_object(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__COMM:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2004-02-14 11:10:30 +05:30
|
|
|
self.family.set_note(note)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2002-10-20 19:55:16 +05:30
|
|
|
note = self.parse_note(matches,self.family,1,note)
|
|
|
|
else:
|
2003-01-10 11:09:40 +05:30
|
|
|
event = RelLib.Event()
|
2002-10-20 19:55:16 +05:30
|
|
|
try:
|
2006-01-23 06:57:46 +05:30
|
|
|
event.set_type((ged2fam[matches[3]],''))
|
2002-10-20 19:55:16 +05:30
|
|
|
except:
|
2006-01-23 06:57:46 +05:30
|
|
|
val = ged2fam_custom.has_key(matches[3])
|
|
|
|
if val:
|
|
|
|
event.set_type((RelLib.Event.CUSTOM,val))
|
|
|
|
else:
|
|
|
|
event.set_type((RelLib.Event.CUSTOM,matches[1]))
|
2006-01-22 08:35:48 +05:30
|
|
|
if event.get_type()[0] == RelLib.Event.MARRIAGE:
|
2006-01-23 06:57:46 +05:30
|
|
|
self.family.set_relationship((RelLib.Family.MARRIED,''))
|
2004-01-05 09:27:01 +05:30
|
|
|
self.parse_family_event(event,2)
|
2006-01-22 08:35:48 +05:30
|
|
|
self.db.add_event(event,self.trans)
|
|
|
|
|
|
|
|
event_ref = RelLib.EventRef()
|
|
|
|
event_ref.set_reference_handle(event.handle)
|
|
|
|
event_ref.set_role((RelLib.EventRef.PRIMARY,''))
|
|
|
|
self.family.add_event_ref(event_ref)
|
2004-05-04 10:04:48 +05:30
|
|
|
del event
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2003-07-21 05:39:12 +05:30
|
|
|
def parse_note_base(self,matches,obj,level,old_note,task):
|
2002-10-20 19:55:16 +05:30
|
|
|
note = old_note
|
2005-12-06 12:08:09 +05:30
|
|
|
if matches[2] and matches[2][0] == "@": # reference to a named note defined elsewhere
|
|
|
|
note_obj = self.note_map.get(matches[2])
|
|
|
|
if note_obj:
|
|
|
|
return note_obj.get()
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2005-12-06 12:08:09 +05:30
|
|
|
return u""
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
|
|
|
if old_note:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = "%s\n%s%s" % (old_note,matches[2])
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2003-07-21 05:39:12 +05:30
|
|
|
task(note)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
return note
|
|
|
|
|
2003-07-21 05:39:12 +05:30
|
|
|
def parse_note(self,matches,obj,level,old_note):
|
2004-04-06 08:50:04 +05:30
|
|
|
return self.parse_note_base(matches,obj,level,old_note,obj.set_note)
|
2003-07-21 05:39:12 +05:30
|
|
|
|
|
|
|
def parse_comment(self,matches,obj,level,old_note):
|
2005-03-28 10:28:28 +05:30
|
|
|
return self.parse_note_base(matches,obj,level,old_note,obj.set_note)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def parse_individual(self,person):
|
|
|
|
state = CurrentState()
|
|
|
|
state.person = person
|
|
|
|
|
2005-05-24 18:38:06 +05:30
|
|
|
while True:
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2005-05-24 18:38:06 +05:30
|
|
|
|
2005-01-17 23:35:50 +05:30
|
|
|
if int(matches[0]) < 1:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
2006-01-22 07:36:46 +05:30
|
|
|
if state.get_text():
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_note(state.get_text())
|
2002-10-20 19:55:16 +05:30
|
|
|
return
|
|
|
|
else:
|
2006-01-18 09:38:28 +05:30
|
|
|
func = self.person_func.get(matches[1],self.func_person_event)
|
2006-01-22 07:36:46 +05:30
|
|
|
func(matches,state)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_optional_note(self,level):
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return note
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2004-10-23 09:26:48 +05:30
|
|
|
if not matches[2].strip() or matches[2] and matches[2][0] != "@":
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_note_data(level+1)
|
|
|
|
else:
|
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
def parse_famc_type(self,level,person):
|
|
|
|
ftype = (RelLib.Person.CHILD_BIRTH,'')
|
2002-10-20 19:55:16 +05:30
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
2005-04-03 02:44:53 +05:30
|
|
|
return (ftype,note)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PEDI:
|
2005-05-31 02:11:43 +05:30
|
|
|
ftype = pedi_type.get(matches[2],RelLib.Person.UNKNOWN)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-01-06 02:09:09 +05:30
|
|
|
source_ref = self.handle_source(matches,level+1)
|
2006-01-23 06:57:46 +05:30
|
|
|
person.get_primary_name().add_source_reference(source_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__PRIMARY:
|
2004-02-29 10:39:23 +05:30
|
|
|
pass #type = matches[1]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2004-10-23 09:26:48 +05:30
|
|
|
if not matches[2].strip() or matches[2] and matches[2][0] != "@":
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_note_data(level+1)
|
|
|
|
else:
|
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
def parse_person_object(self,level,state):
|
2002-10-20 19:55:16 +05:30
|
|
|
form = ""
|
2004-10-06 09:12:54 +05:30
|
|
|
filename = ""
|
2004-11-24 09:25:25 +05:30
|
|
|
title = "no title"
|
2002-10-20 19:55:16 +05:30
|
|
|
note = ""
|
2005-05-24 18:38:06 +05:30
|
|
|
while True:
|
2002-10-20 19:55:16 +05:30
|
|
|
matches = self.get_next()
|
2005-05-24 18:38:06 +05:30
|
|
|
if int(matches[0]) < level:
|
|
|
|
self.backup()
|
|
|
|
break
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FORM:
|
2005-01-17 23:35:50 +05:30
|
|
|
form = matches[2].lower()
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TITL:
|
2002-10-20 19:55:16 +05:30
|
|
|
title = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FILE:
|
2004-10-06 09:12:54 +05:30
|
|
|
filename = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2006-01-19 09:55:36 +05:30
|
|
|
elif matches[1] == TOKEN_UNKNOWN:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
if form == "url":
|
2003-01-10 11:09:40 +05:30
|
|
|
url = RelLib.Url()
|
2004-10-06 09:12:54 +05:30
|
|
|
url.set_path(filename)
|
2002-10-20 19:55:16 +05:30
|
|
|
url.set_description(title)
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_url(url)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-10-06 09:12:54 +05:30
|
|
|
(ok,path) = self.find_file(filename,self.dir_path)
|
2003-03-31 07:03:40 +05:30
|
|
|
if not ok:
|
2006-01-24 04:06:34 +05:30
|
|
|
self.warn(_("Could not import %s") % filename)
|
2005-05-24 18:38:06 +05:30
|
|
|
path = filename.replace('\\','/')
|
|
|
|
photo_handle = self.media_map.get(path)
|
|
|
|
if photo_handle == None:
|
|
|
|
photo = RelLib.MediaObject()
|
|
|
|
photo.set_path(path)
|
|
|
|
photo.set_description(title)
|
|
|
|
photo.set_mime_type(GrampsMime.get_type(os.path.abspath(path)))
|
|
|
|
self.db.add_object(photo, self.trans)
|
|
|
|
self.media_map[path] = photo.get_handle()
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2005-05-24 18:38:06 +05:30
|
|
|
photo = self.db.get_object_from_handle(photo_handle)
|
|
|
|
oref = RelLib.MediaRef()
|
|
|
|
oref.set_reference_handle(photo.get_handle())
|
|
|
|
oref.set_note(note)
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_media_reference(oref)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_family_object(self,level):
|
|
|
|
form = ""
|
2004-10-06 09:12:54 +05:30
|
|
|
filename = ""
|
2002-10-20 19:55:16 +05:30
|
|
|
title = ""
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
2006-01-18 09:38:28 +05:30
|
|
|
if matches[1] == TOKEN_FORM:
|
2005-01-17 23:35:50 +05:30
|
|
|
form = matches[2].lower()
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TITL:
|
2002-10-20 19:55:16 +05:30
|
|
|
title = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FILE:
|
2004-10-06 09:12:54 +05:30
|
|
|
filename = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2004-01-05 09:27:01 +05:30
|
|
|
elif int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
break
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
if form:
|
2004-10-06 09:12:54 +05:30
|
|
|
(ok,path) = self.find_file(filename,self.dir_path)
|
2003-03-31 07:03:40 +05:30
|
|
|
if not ok:
|
2006-01-24 04:06:34 +05:30
|
|
|
self.warn(_("Could not import %s") % filename)
|
2005-05-24 18:38:06 +05:30
|
|
|
path = filename.replace('\\','/')
|
|
|
|
photo_handle = self.media_map.get(path)
|
|
|
|
if photo_handle == None:
|
|
|
|
photo = RelLib.MediaObject()
|
|
|
|
photo.set_path(path)
|
|
|
|
photo.set_description(title)
|
|
|
|
photo.set_mime_type(GrampsMime.get_type(os.path.abspath(path)))
|
|
|
|
self.db.add_object(photo, self.trans)
|
|
|
|
self.media_map[path] = photo.get_handle()
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2005-05-24 18:38:06 +05:30
|
|
|
photo = self.db.get_object_from_handle(photo_handle)
|
|
|
|
oref = RelLib.MediaRef()
|
|
|
|
oref.set_reference_handle(photo.get_handle())
|
|
|
|
oref.set_note(note)
|
|
|
|
self.family.add_media_reference(oref)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_residence(self,address,level):
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_date_object(self.extract_date(matches[2]))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_ADDR:
|
2006-01-24 03:18:34 +05:30
|
|
|
address.set_street(matches[2])
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_address(address,level+1)
|
2006-01-24 03:18:34 +05:30
|
|
|
elif matches[1] in (TOKEN_AGE,TOKEN_AGNC,TOKEN_CAUS,TOKEN_STAT,
|
|
|
|
TOKEN_TEMP,TOKEN_OBJE,TOKEN_TYPE,TOKEN__DATE2):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.add_source_reference(self.handle_source(matches,level+1))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_street(matches[2])
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_address(address,level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PHON:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_street("Unknown")
|
|
|
|
address.set_phone(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2002-10-20 19:55:16 +05:30
|
|
|
note = self.parse_note(matches,address,level+1,note)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_address(self,address,level):
|
|
|
|
first = 0
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2006-01-18 09:38:28 +05:30
|
|
|
if matches[1] == TOKEN_PHON:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_phone(matches[2])
|
2003-12-10 07:37:05 +05:30
|
|
|
else:
|
|
|
|
self.backup()
|
2002-10-20 19:55:16 +05:30
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_ADDR, TOKEN_ADR1, TOKEN_ADR2):
|
2004-02-14 11:10:30 +05:30
|
|
|
val = address.get_street()
|
2002-10-20 19:55:16 +05:30
|
|
|
if first == 0:
|
2006-01-24 03:18:34 +05:30
|
|
|
val = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
first = 1
|
|
|
|
else:
|
2006-01-24 03:18:34 +05:30
|
|
|
val = "%s,%s" % (val,matches[2])
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_street(val)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CITY:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_city(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_STAE:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_state(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_POST:
|
2004-02-14 11:10:30 +05:30
|
|
|
address.set_postal_code(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CTRY:
|
2004-08-11 21:52:36 +05:30
|
|
|
address.set_country(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PHON:
|
2005-05-24 18:38:06 +05:30
|
|
|
address.set_phone(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2005-05-24 18:38:06 +05:30
|
|
|
note = self.parse_note(matches,address,level+1,note)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__LOC:
|
2005-04-26 21:34:21 +05:30
|
|
|
pass # ignore unsupported extended location syntax
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__NAME:
|
2005-05-24 18:38:06 +05:30
|
|
|
pass # ignore
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2004-10-06 09:12:54 +05:30
|
|
|
def parse_ord(self,lds_ord,level):
|
2002-10-20 19:55:16 +05:30
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
self.backup()
|
|
|
|
break
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TEMP:
|
2004-05-24 10:02:19 +05:30
|
|
|
value = extract_temple(matches)
|
2004-05-15 19:54:38 +05:30
|
|
|
if value:
|
2004-10-06 09:12:54 +05:30
|
|
|
lds_ord.set_temple(value)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2004-10-06 09:12:54 +05:30
|
|
|
lds_ord.set_date_object(self.extract_date(matches[2]))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FAMC:
|
2004-10-19 08:49:25 +05:30
|
|
|
lds_ord.set_family_handle(self.find_family_handle(matches[2][1:-1]))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2002-10-20 19:55:16 +05:30
|
|
|
try:
|
2004-08-11 09:12:38 +05:30
|
|
|
place = self.find_or_create_place(matches[2])
|
|
|
|
place.set_title(matches[2])
|
|
|
|
place_handle = place.get_handle()
|
2004-10-06 09:12:54 +05:30
|
|
|
lds_ord.set_place_handle(place_handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
except NameError:
|
|
|
|
pass
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-10-06 09:12:54 +05:30
|
|
|
lds_ord.add_source_reference(self.handle_source(matches,level+1))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2004-10-06 09:12:54 +05:30
|
|
|
note = self.parse_note(matches,lds_ord,level+1,note)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_STAT:
|
2002-10-20 19:55:16 +05:30
|
|
|
if const.lds_status.has_key(matches[2]):
|
2004-10-06 09:12:54 +05:30
|
|
|
lds_ord.set_status(const.lds_status[matches[2]])
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
|
|
|
self.barf(level+1)
|
|
|
|
|
|
|
|
def parse_person_event(self,event,level):
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
2003-01-19 11:55:20 +05:30
|
|
|
if note:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_note(note)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
break
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TYPE:
|
2006-01-22 08:35:48 +05:30
|
|
|
if event.get_type() == (RelLib.Event.CUSTOM,""):
|
2002-10-20 19:55:16 +05:30
|
|
|
if ged2gramps.has_key(matches[2]):
|
2006-01-22 08:35:48 +05:30
|
|
|
name = (ged2gramps[matches[2]],'')
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2006-01-22 09:38:37 +05:30
|
|
|
val = self.gedsource.tag2gramps(matches[2])
|
2002-10-20 19:55:16 +05:30
|
|
|
if val:
|
2006-01-22 08:35:48 +05:30
|
|
|
name = (RelLib.Event.CUSTOM,val)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2006-01-22 08:35:48 +05:30
|
|
|
name = (RelLib.Event.CUSTOM,matches[3])
|
|
|
|
event.set_type(name)
|
2004-09-12 04:54:08 +05:30
|
|
|
else:
|
|
|
|
event.set_description(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__PRIV and matches[2] == "Y":
|
2005-05-24 18:38:06 +05:30
|
|
|
event.set_privacy(True)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_date_object(self.extract_date(matches[2]))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.add_source_reference(self.handle_source(matches,level+1))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2002-10-20 19:55:16 +05:30
|
|
|
val = matches[2]
|
2006-01-22 08:35:48 +05:30
|
|
|
n = event.get_type()
|
|
|
|
if self.is_ftw and n[0] in [RelLib.Event.OCCUPATION,RelLib.Event.DEGREE]:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_description(val)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
else:
|
2004-08-11 09:12:38 +05:30
|
|
|
place = self.find_or_create_place(val)
|
|
|
|
place_handle = place.get_handle()
|
|
|
|
place.set_title(matches[2])
|
2004-07-28 07:59:07 +05:30
|
|
|
event.set_place_handle(place_handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CAUS:
|
2006-01-24 03:18:34 +05:30
|
|
|
info = matches[2]
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_cause(info)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_cause(event,level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_NOTE,TOKEN_OFFI):
|
2006-01-24 03:18:34 +05:30
|
|
|
info = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
if note == "":
|
|
|
|
note = info
|
|
|
|
else:
|
|
|
|
note = "\n%s" % info
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN__GODP, TOKEN__WITN, TOKEN__WTN):
|
2005-04-26 21:34:21 +05:30
|
|
|
if matches[2][0] == "@":
|
|
|
|
witness_handle = self.find_person_handle(self.map_gid(matches[2][1:-1]))
|
|
|
|
witness = RelLib.Witness(RelLib.Event.ID,witness_handle)
|
|
|
|
else:
|
|
|
|
witness = RelLib.Witness(RelLib.Event.NAME,matches[2])
|
|
|
|
event.add_witness(witness)
|
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_RELI, TOKEN_TIME, TOKEN_ADDR,TOKEN_AGE,
|
|
|
|
TOKEN_AGNC,TOKEN_STAT,TOKEN_TEMP,TOKEN_OBJE,
|
|
|
|
TOKEN__DATE2):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_adopt_event(self,event,level):
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
if note != "":
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_note(note)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
break
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_date_object(self.extract_date(matches[2]))
|
2006-01-24 03:18:34 +05:30
|
|
|
elif matches[1] in (TOKEN_TIME,TOKEN_ADDR,TOKEN_AGE,TOKEN_AGNC,
|
|
|
|
TOKEN_STAT,TOKEN_TEMP,TOKEN_OBJE):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.add_source_reference(self.handle_source(matches,level+1))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FAMC:
|
2004-10-19 08:49:25 +05:30
|
|
|
handle = self.find_family_handle(matches[2][1:-1])
|
2002-10-20 19:55:16 +05:30
|
|
|
mrel,frel = self.parse_adopt_famc(level+1);
|
2004-10-19 08:49:25 +05:30
|
|
|
if self.person.get_main_parents_family_handle() == handle:
|
2004-07-28 07:59:07 +05:30
|
|
|
self.person.set_main_parent_family_handle(None)
|
2004-10-19 08:49:25 +05:30
|
|
|
self.person.add_parent_family_handle(handle,mrel,frel)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2002-10-20 19:55:16 +05:30
|
|
|
val = matches[2]
|
2004-08-11 09:12:38 +05:30
|
|
|
place = self.find_or_create_place(val)
|
|
|
|
place_handle = place.get_handle()
|
|
|
|
place.set_title(matches[2])
|
2004-07-28 07:59:07 +05:30
|
|
|
event.set_place_handle(place_handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TYPE:
|
2002-10-20 19:55:16 +05:30
|
|
|
# eventually do something intelligent here
|
|
|
|
pass
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CAUS:
|
2006-01-24 03:18:34 +05:30
|
|
|
info = matches[2]
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_cause(info)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_cause(event,level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2006-01-24 03:18:34 +05:30
|
|
|
info = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
if note == "":
|
|
|
|
note = info
|
|
|
|
else:
|
|
|
|
note = "\n%s" % info
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_adopt_famc(self,level):
|
2005-06-01 01:06:28 +05:30
|
|
|
mrel = RelLib.Person.CHILD_ADOPTED
|
|
|
|
frel = RelLib.Person.CHILD_ADOPTED
|
2002-10-20 19:55:16 +05:30
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
self.backup()
|
|
|
|
return (mrel,frel)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_ADOP:
|
2002-10-20 19:55:16 +05:30
|
|
|
if matches[2] == "HUSB":
|
2005-06-01 01:06:28 +05:30
|
|
|
mrel = RelLib.Person.CHILD_BIRTH
|
2002-10-20 19:55:16 +05:30
|
|
|
elif matches[2] == "WIFE":
|
2005-06-01 01:06:28 +05:30
|
|
|
frel = RelLib.Person.CHILD_BIRTH
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def parse_person_attr(self,attr,level):
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
self.backup()
|
|
|
|
break
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TYPE:
|
2004-02-14 11:10:30 +05:30
|
|
|
if attr.get_type() == "":
|
2002-10-20 19:55:16 +05:30
|
|
|
if ged2gramps.has_key(matches[2]):
|
|
|
|
name = ged2gramps[matches[2]]
|
|
|
|
else:
|
|
|
|
val = self.gedsource.tag2gramps(matches[2])
|
|
|
|
if val:
|
|
|
|
name = val
|
|
|
|
else:
|
|
|
|
name = matches[2]
|
2004-02-14 11:10:30 +05:30
|
|
|
attr.set_name(name)
|
2006-01-24 03:18:34 +05:30
|
|
|
elif matches[1] in (TOKEN_CAUS,TOKEN_DATE,TOKEN_TIME,TOKEN_ADDR,
|
|
|
|
TOKEN_AGE,TOKEN_AGNC,TOKEN_STAT,TOKEN_TEMP,TOKEN_OBJE):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-02-14 11:10:30 +05:30
|
|
|
attr.add_source_reference(self.handle_source(matches,level+1))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2002-10-20 19:55:16 +05:30
|
|
|
val = matches[2]
|
2004-02-14 11:10:30 +05:30
|
|
|
if attr.get_value() == "":
|
|
|
|
attr.set_value(val)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2002-10-20 19:55:16 +05:30
|
|
|
note = "%s\n\n" % ("Date : %s" % matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2006-01-24 03:18:34 +05:30
|
|
|
info = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
if note == "":
|
|
|
|
note = info
|
|
|
|
else:
|
|
|
|
note = "%s\n\n%s" % (note,info)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
if note != "":
|
2004-02-14 11:10:30 +05:30
|
|
|
attr.set_note(note)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_family_event(self,event,level):
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
if note:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_note(note)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
break
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TYPE:
|
2006-01-23 06:57:46 +05:30
|
|
|
etype = event.get_type()
|
|
|
|
if etype[0] == RelLib.Event.CUSTOM:
|
2002-10-20 19:55:16 +05:30
|
|
|
try:
|
2006-01-23 07:25:29 +05:30
|
|
|
event.set_type((ged2fam[matches[2]],''))
|
2002-10-20 19:55:16 +05:30
|
|
|
except:
|
2006-01-23 07:25:29 +05:30
|
|
|
event.set_type((RelLib.Event.CUSTOM,matches[2]))
|
2003-09-18 08:29:52 +05:30
|
|
|
else:
|
|
|
|
note = 'Status = %s\n' % matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_date_object(self.extract_date(matches[2]))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CAUS:
|
2006-01-24 03:18:34 +05:30
|
|
|
info = matches[2]
|
2004-02-14 11:10:30 +05:30
|
|
|
event.set_cause(info)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_cause(event,level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_TIME,TOKEN_AGE,TOKEN_AGNC,TOKEN_ADDR,TOKEN_STAT,
|
|
|
|
TOKEN_TEMP,TOKEN_HUSB,TOKEN_WIFE,TOKEN_OBJE,TOKEN__CHUR):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2004-02-14 11:10:30 +05:30
|
|
|
event.add_source_reference(self.handle_source(matches,level+1))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2002-10-20 19:55:16 +05:30
|
|
|
val = matches[2]
|
2004-08-11 09:12:38 +05:30
|
|
|
place = self.find_or_create_place(val)
|
|
|
|
place_handle = place.get_handle()
|
|
|
|
place.set_title(matches[2])
|
2004-07-28 07:59:07 +05:30
|
|
|
event.set_place_handle(place_handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_OFFI:
|
2002-10-20 19:55:16 +05:30
|
|
|
if note == "":
|
|
|
|
note = matches[2]
|
|
|
|
else:
|
|
|
|
note = note + "\n" + matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2002-10-20 19:55:16 +05:30
|
|
|
note = self.parse_note(matches,event,level+1,note)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN__WITN, TOKEN__WTN):
|
2005-04-26 21:34:21 +05:30
|
|
|
if matches[2][0] == "@":
|
|
|
|
witness_handle = self.find_person_handle(self.map_gid(matches[2][1:-1]))
|
|
|
|
witness = RelLib.Witness(RelLib.Event.ID,witness_handle)
|
|
|
|
else:
|
|
|
|
witness = RelLib.Witness(RelLib.Event.NAME,matches[2])
|
|
|
|
event.add_witness(witness)
|
|
|
|
self.ignore_sub_junk(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_source_reference(self,source,level):
|
|
|
|
"""Reads the data associated with a SOUR reference"""
|
|
|
|
note = ""
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2005-12-06 12:08:09 +05:30
|
|
|
source.set_note(note)
|
2004-01-05 09:27:01 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PAGE:
|
2006-01-24 03:18:34 +05:30
|
|
|
source.set_page(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2005-05-24 18:38:06 +05:30
|
|
|
source.set_date_object(self.extract_date(matches[2]))
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATA:
|
2002-10-20 19:55:16 +05:30
|
|
|
date,text = self.parse_source_data(level+1)
|
2005-05-24 18:38:06 +05:30
|
|
|
if date:
|
|
|
|
d = self.dp.parse(date)
|
|
|
|
source.set_date_object(d)
|
2004-02-14 11:10:30 +05:30
|
|
|
source.set_text(text)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_OBJE,TOKEN_REFN):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_QUAY:
|
2005-12-06 12:08:09 +05:30
|
|
|
try:
|
|
|
|
val = int(matches[2])
|
|
|
|
except ValueError:
|
|
|
|
return
|
2002-10-20 19:55:16 +05:30
|
|
|
if val > 1:
|
2004-02-14 11:10:30 +05:30
|
|
|
source.set_confidence_level(val+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-02-14 11:10:30 +05:30
|
|
|
source.set_confidence_level(val)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_NOTE,TOKEN_TEXT):
|
2003-07-21 05:39:12 +05:30
|
|
|
note = self.parse_comment(matches,source,level+1,note)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_source_data(self,level):
|
|
|
|
"""Parses the source data"""
|
|
|
|
date = ""
|
|
|
|
note = ""
|
|
|
|
while 1:
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return (date,note)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2002-10-20 19:55:16 +05:30
|
|
|
date = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TEXT:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
|
|
|
|
2003-07-21 05:39:12 +05:30
|
|
|
def parse_marnm(self,person,text):
|
|
|
|
data = text.split()
|
|
|
|
if len(data) == 1:
|
2004-02-14 11:10:30 +05:30
|
|
|
name = RelLib.Name(person.get_primary_name())
|
|
|
|
name.set_surname(data[0])
|
2006-01-19 09:55:36 +05:30
|
|
|
name.set_type((RelLib.Name.MARRIED,''))
|
2004-02-14 11:10:30 +05:30
|
|
|
person.add_alternate_name(name)
|
2003-07-21 05:39:12 +05:30
|
|
|
elif len(data) > 1:
|
|
|
|
name = RelLib.Name()
|
2004-02-14 11:10:30 +05:30
|
|
|
name.set_surname(data[-1])
|
2005-01-17 23:35:50 +05:30
|
|
|
name.set_first_name(' '.join(data[0:-1]))
|
2006-01-19 09:55:36 +05:30
|
|
|
name.set_type((RelLib.Name.MARRIED,''))
|
2004-02-14 11:10:30 +05:30
|
|
|
person.add_alternate_name(name)
|
2003-07-21 05:39:12 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def parse_header_head(self):
|
|
|
|
"""validiates that this is a valid GEDCOM file"""
|
2006-01-24 03:18:34 +05:30
|
|
|
line = self.lexer.read()
|
|
|
|
if line[1] != TOKEN_HEAD:
|
2004-01-05 09:27:01 +05:30
|
|
|
raise Errors.GedcomError("%s is not a GEDCOM file" % self.filename)
|
2006-01-24 03:18:34 +05:30
|
|
|
self.index += 1
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_header_source(self):
|
|
|
|
genby = ""
|
|
|
|
while 1:
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2005-01-17 23:35:50 +05:30
|
|
|
if int(matches[0]) < 1:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_SOUR:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.gedsource = self.gedmap.get_from_source_tag(matches[2])
|
2006-01-24 03:18:34 +05:30
|
|
|
self.lexer.set_broken_conc(self.gedsource.get_conc())
|
2002-10-20 19:55:16 +05:30
|
|
|
if matches[2] == "FTW":
|
|
|
|
self.is_ftw = 1
|
|
|
|
genby = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NAME:
|
|
|
|
pass
|
|
|
|
elif matches[1] == TOKEN_VERS:
|
2004-11-27 05:01:50 +05:30
|
|
|
self.def_src.set_data_item('Generated by',"%s %s" %
|
|
|
|
(genby,matches[2]))
|
2003-07-21 05:39:12 +05:30
|
|
|
pass
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FILE:
|
2004-11-27 05:01:50 +05:30
|
|
|
filename = os.path.basename(matches[2]).split('\\')[-1]
|
2005-05-24 18:38:06 +05:30
|
|
|
self.def_src.set_title(_("Import from %s") % filename)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_COPR:
|
2005-05-24 18:38:06 +05:30
|
|
|
self.def_src.set_publication_info(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] in (TOKEN_CORP,TOKEN_DATA,TOKEN_SUBM,TOKEN_SUBN,TOKEN_LANG):
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DEST:
|
2002-10-20 19:55:16 +05:30
|
|
|
if genby == "GRAMPS":
|
|
|
|
self.gedsource = self.gedmap.get_from_source_tag(matches[2])
|
2006-01-24 03:18:34 +05:30
|
|
|
self.lexer.set_broken_conc(self.gedsource.get_conc())
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_CHAR and not self.override:
|
2006-01-24 03:18:34 +05:30
|
|
|
if matches[2] == "ANSEL":
|
|
|
|
self.lexer.set_charset_fn(ansel_to_utf8)
|
|
|
|
elif matches[2] not in ("UNICODE","UTF-8","UTF8"):
|
|
|
|
self.lexer.set_charset_fn(latin_to_utf8)
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_GEDC:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN__SCHEMA:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_ftw_schema(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_PLAC:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_place_form(2)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_DATE:
|
2002-10-20 19:55:16 +05:30
|
|
|
date = self.parse_date(2)
|
|
|
|
date.date = matches[2]
|
2005-05-24 18:38:06 +05:30
|
|
|
self.def_src.set_data_item('Creation date',matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_NOTE:
|
2006-01-24 03:18:34 +05:30
|
|
|
note = matches[2]
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1][0] == TOKEN_UNKNOWN:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(2)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(2)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_ftw_schema(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_INDI:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_ftw_indi_schema(level+1)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_FAM:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.parse_ftw_fam_schema(level+1)
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(2)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def parse_ftw_indi_schema(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
label = self.parse_label(level+1)
|
|
|
|
ged2gramps[matches[1]] = label
|
|
|
|
|
|
|
|
def parse_label(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_LABL:
|
2002-10-20 19:55:16 +05:30
|
|
|
return matches[2]
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(2)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def parse_ftw_fam_schema(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
label = self.parse_label(level+1)
|
2006-01-23 06:57:46 +05:30
|
|
|
ged2fam[matches[3]] = label
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def ignore_sub_junk(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2004-10-06 09:12:54 +05:30
|
|
|
return
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def ignore_change_data(self,level):
|
2004-01-05 09:27:01 +05:30
|
|
|
matches = self.get_next()
|
2006-01-18 09:38:28 +05:30
|
|
|
if matches[1] == TOKEN_CHAN:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
|
|
|
else:
|
|
|
|
self.backup()
|
|
|
|
|
|
|
|
def parse_place_form(self,level):
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] != TOKEN_FORM:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
def parse_date(self,level):
|
|
|
|
date = DateStruct()
|
|
|
|
while 1:
|
|
|
|
matches = self.get_next()
|
|
|
|
|
2004-01-05 09:27:01 +05:30
|
|
|
if int(matches[0]) < level:
|
2002-10-20 19:55:16 +05:30
|
|
|
self.backup()
|
|
|
|
return date
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[1] == TOKEN_TIME:
|
2002-10-20 19:55:16 +05:30
|
|
|
date.time = matches[2]
|
|
|
|
else:
|
2004-01-05 09:27:01 +05:30
|
|
|
self.barf(level+1)
|
2004-10-06 09:12:54 +05:30
|
|
|
return None
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def extract_date(self,text):
|
|
|
|
dateobj = Date.Date()
|
|
|
|
try:
|
2004-09-17 09:00:04 +05:30
|
|
|
match = rangeRegexp.match(text)
|
2002-10-20 19:55:16 +05:30
|
|
|
if match:
|
|
|
|
(cal1,data1,cal2,data2) = match.groups()
|
|
|
|
if cal1 != cal2:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if cal1 == "FRENCH R":
|
2004-09-17 09:00:04 +05:30
|
|
|
cal = Date.CAL_FRENCH
|
2002-10-20 19:55:16 +05:30
|
|
|
elif cal1 == "JULIAN":
|
2004-09-17 09:00:04 +05:30
|
|
|
cal = Date.CAL_JULIAN
|
2002-10-20 19:55:16 +05:30
|
|
|
elif cal1 == "HEBREW":
|
2004-09-17 09:00:04 +05:30
|
|
|
cal = Date.CAL_HEBREW
|
|
|
|
else:
|
|
|
|
cal = Date.CAL_GREGORIAN
|
|
|
|
|
|
|
|
start = self.dp.parse(data1)
|
|
|
|
stop = self.dp.parse(data2)
|
|
|
|
dateobj.set(Date.QUAL_NONE, Date.MOD_RANGE, cal,
|
|
|
|
start.get_start_date() + stop.get_start_date())
|
|
|
|
return dateobj
|
|
|
|
|
|
|
|
match = spanRegexp.match(text)
|
|
|
|
if match:
|
|
|
|
(cal1,data1,cal2,data2) = match.groups()
|
|
|
|
if cal1 != cal2:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if cal1 == "FRENCH R":
|
|
|
|
cal = Date.CAL_FRENCH
|
|
|
|
elif cal1 == "JULIAN":
|
|
|
|
cal = Date.CAL_JULIAN
|
|
|
|
elif cal1 == "HEBREW":
|
|
|
|
cal = Date.CAL_HEBREW
|
|
|
|
else:
|
|
|
|
cal = Date.CAL_GREGORIAN
|
|
|
|
|
|
|
|
start = self.dp.parse(data1)
|
|
|
|
stop = self.dp.parse(data2)
|
|
|
|
dateobj.set(Date.QUAL_NONE, Date.MOD_SPAN, cal,
|
|
|
|
start.get_start_date() + stop.get_start_date())
|
2002-10-20 19:55:16 +05:30
|
|
|
return dateobj
|
|
|
|
|
|
|
|
match = calRegexp.match(text)
|
|
|
|
if match:
|
|
|
|
(abt,cal,data) = match.groups()
|
2004-09-17 09:00:04 +05:30
|
|
|
dateobj = self.dp.parse("%s %s" % (abt, data))
|
2002-10-20 19:55:16 +05:30
|
|
|
if cal == "FRENCH R":
|
2004-09-17 09:00:04 +05:30
|
|
|
dateobj.set_calendar(Date.CAL_FRENCH)
|
2002-10-20 19:55:16 +05:30
|
|
|
elif cal == "JULIAN":
|
2004-09-17 09:00:04 +05:30
|
|
|
dateobj.set_calendar(Date.CAL_JULIAN)
|
2002-10-20 19:55:16 +05:30
|
|
|
elif cal == "HEBREW":
|
2004-09-17 09:00:04 +05:30
|
|
|
dateobj.set_calendar(Date.CAL_HEBREW)
|
|
|
|
return dateobj
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-10-16 10:40:35 +05:30
|
|
|
dval = self.dp.parse(text)
|
|
|
|
return dval
|
2004-09-17 09:00:04 +05:30
|
|
|
except IOError:
|
|
|
|
return self.dp.set_text(text)
|
2002-10-20 19:55:16 +05:30
|
|
|
|
|
|
|
def handle_source(self,matches,level):
|
2003-01-10 11:09:40 +05:30
|
|
|
source_ref = RelLib.SourceRef()
|
2002-10-20 19:55:16 +05:30
|
|
|
if matches[2] and matches[2][0] != "@":
|
2005-05-11 19:57:32 +05:30
|
|
|
title = matches[2]
|
2006-01-24 03:18:34 +05:30
|
|
|
note = ''
|
2005-05-11 19:57:32 +05:30
|
|
|
handle = self.inline_srcs.get((title,note),Utils.create_id())
|
|
|
|
self.inline_srcs[(title,note)] = handle
|
2004-01-05 11:33:28 +05:30
|
|
|
self.ignore_sub_junk(level+1)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2005-05-11 19:57:32 +05:30
|
|
|
handle = self.find_or_create_source(matches[2][1:-1]).get_handle()
|
2003-07-21 05:39:12 +05:30
|
|
|
self.parse_source_reference(source_ref,level)
|
2005-05-11 19:57:32 +05:30
|
|
|
source_ref.set_base_handle(handle)
|
2002-10-20 19:55:16 +05:30
|
|
|
return source_ref
|
|
|
|
|
|
|
|
def resolve_refns(self):
|
2004-06-27 08:40:06 +05:30
|
|
|
return
|
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
prefix = self.db.iprefix
|
|
|
|
index = 0
|
2004-02-21 11:41:59 +05:30
|
|
|
new_pmax = self.db.pmap_index
|
2004-05-04 10:04:48 +05:30
|
|
|
for pid in self.added.keys():
|
2002-10-20 19:55:16 +05:30
|
|
|
index = index + 1
|
|
|
|
if self.refn.has_key(pid):
|
|
|
|
val = self.refn[pid]
|
|
|
|
new_key = prefix % val
|
|
|
|
new_pmax = max(new_pmax,val)
|
|
|
|
|
2004-08-07 10:46:57 +05:30
|
|
|
person = self.db.get_person_from_handle(pid,self.trans)
|
2004-05-04 10:04:48 +05:30
|
|
|
|
2002-10-20 19:55:16 +05:30
|
|
|
# new ID is not used
|
2004-07-28 07:59:07 +05:30
|
|
|
if not self.db.has_person_handle(new_key):
|
2004-08-13 10:04:07 +05:30
|
|
|
self.db.remove_person(pid,self.trans)
|
2004-07-28 07:59:07 +05:30
|
|
|
person.set_handle(new_key)
|
2004-06-27 08:40:06 +05:30
|
|
|
person.set_gramps_id(new_key)
|
2004-05-20 10:11:55 +05:30
|
|
|
self.db.add_person(person,self.trans)
|
2002-10-20 19:55:16 +05:30
|
|
|
else:
|
2004-08-07 10:46:57 +05:30
|
|
|
tp = self.db.get_person_from_handle(new_key,self.trans)
|
2002-10-20 19:55:16 +05:30
|
|
|
# same person, just change it
|
|
|
|
if person == tp:
|
2004-08-13 10:04:07 +05:30
|
|
|
self.db.remove_person(pid,self.trans)
|
2004-07-28 07:59:07 +05:30
|
|
|
person.set_handle(new_key)
|
2004-06-27 08:40:06 +05:30
|
|
|
person.set_gramps_id(new_key)
|
2004-08-11 09:12:38 +05:30
|
|
|
self.db.add_person(person,self.trans)
|
2002-10-20 19:55:16 +05:30
|
|
|
# give up trying to use the refn as a key
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
|
2004-02-21 11:41:59 +05:30
|
|
|
self.db.pmap_index = new_pmax
|
2002-10-20 19:55:16 +05:30
|
|
|
|
2004-12-26 23:53:50 +05:30
|
|
|
def invert_year(self,subdate):
|
|
|
|
return (subdate[0],subdate[1],-subdate[2],subdate[3])
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
#--------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#--------------------------------------------------------------------
|
|
|
|
def func_person_name(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
name = RelLib.Name()
|
|
|
|
m = snameRegexp.match(matches[2])
|
|
|
|
if m:
|
|
|
|
(n,n2) = m.groups()
|
|
|
|
names = (n2,'',n,'','')
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
names = nameRegexp.match(matches[2]).groups()
|
|
|
|
except:
|
|
|
|
names = (matches[2],"","","","")
|
|
|
|
if names[0]:
|
|
|
|
name.set_first_name(names[0].strip())
|
|
|
|
if names[2]:
|
|
|
|
name.set_surname(names[2].strip())
|
|
|
|
if names[4]:
|
|
|
|
name.set_suffix(names[4].strip())
|
2006-01-22 07:36:46 +05:30
|
|
|
if state.name_cnt == 0:
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_primary_name(name)
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_alternate_name(name)
|
2006-01-22 07:36:46 +05:30
|
|
|
state.name_cnt += 1
|
|
|
|
self.parse_name(name,2,state)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_alt_name(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
aka = RelLib.Name()
|
|
|
|
try:
|
|
|
|
names = nameRegexp.match(matches[2]).groups()
|
|
|
|
except:
|
|
|
|
names = (matches[2],"","","","")
|
|
|
|
if names[0]:
|
|
|
|
aka.set_first_name(names[0])
|
|
|
|
if names[2]:
|
|
|
|
aka.set_surname(names[2])
|
|
|
|
if names[4]:
|
|
|
|
aka.set_suffix(names[4])
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_alternate_name(aka)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_object(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
if matches[2] and matches[2][0] == '@':
|
|
|
|
self.barf(2)
|
|
|
|
else:
|
2006-01-23 06:57:46 +05:30
|
|
|
self.parse_person_object(2,state)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_note(self,matches,state):
|
|
|
|
self.note = self.parse_note(matches,self.person,1,state)#self.note)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_sex(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
if matches[2] == '':
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_gender(RelLib.Person.UNKNOWN)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[2][0] == "M":
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_gender(RelLib.Person.MALE)
|
2006-01-18 09:38:28 +05:30
|
|
|
elif matches[2][0] == "F":
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_gender(RelLib.Person.FEMALE)
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_gender(RelLib.Person.UNKNOWN)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_bapl(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
lds_ord = RelLib.LdsOrd()
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_lds_baptism(lds_ord)
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_ord(lds_ord,2)
|
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_endl(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
lds_ord = RelLib.LdsOrd()
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_lds_endowment(lds_ord)
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_ord(lds_ord,2)
|
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_slgc(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
lds_ord = RelLib.LdsOrd()
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_lds_sealing(lds_ord)
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_ord(lds_ord,2)
|
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_fams(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
handle = self.find_family_handle(matches[2][1:-1])
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_family_handle(handle)
|
2006-01-22 07:36:46 +05:30
|
|
|
state.add_to_note(self.parse_optional_note(2))
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_famc(self,matches,state):
|
2006-01-23 06:57:46 +05:30
|
|
|
ftype,note = self.parse_famc_type(2,state.person)
|
2006-01-18 09:38:28 +05:30
|
|
|
handle = self.find_family_handle(matches[2][1:-1])
|
|
|
|
|
|
|
|
for f in self.person.get_parent_family_handle_list():
|
|
|
|
if f[0] == handle:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if ftype in rel_types:
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_parent_family_handle(
|
|
|
|
handle, (RelLib.Person.CHILD_BIRTH,''),
|
|
|
|
(RelLib.Person.CHILD_BIRTH,''))
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-23 06:57:46 +05:30
|
|
|
if state.person.get_main_parents_family_handle() == handle:
|
|
|
|
state.person.set_main_parent_family_handle(None)
|
2006-01-23 09:39:20 +05:30
|
|
|
state.person.add_parent_family_handle(handle,ftype,ftype)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_resi(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
addr = RelLib.Address()
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_address(addr)
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_residence(addr,2)
|
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_addr(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
addr = RelLib.Address()
|
2006-01-24 03:18:34 +05:30
|
|
|
addr.set_street(matches[2])
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_address(addr,2)
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_address(addr)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_phon(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
addr = RelLib.Address()
|
|
|
|
addr.set_street("Unknown")
|
|
|
|
addr.set_phone(matches[2])
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_address(addr)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_birt(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
event = RelLib.Event()
|
|
|
|
if matches[2]:
|
|
|
|
event.set_description(matches[2])
|
2006-01-22 07:36:46 +05:30
|
|
|
event.set_type((RelLib.Event.BIRTH,""))
|
|
|
|
self.parse_person_event(event,2)
|
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
self.db.add_event(event, self.trans)
|
2006-01-22 07:36:46 +05:30
|
|
|
|
|
|
|
event_ref = RelLib.EventRef()
|
|
|
|
event_ref.set_reference_handle(event.handle)
|
|
|
|
event_ref.set_role((RelLib.EventRef.PRIMARY,''))
|
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
if state.person.get_birth_ref():
|
|
|
|
state.person.add_event_ref(event_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_birth_ref(event_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_adop(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
event = RelLib.Event()
|
2006-01-22 07:36:46 +05:30
|
|
|
event.set_type((RelLib.Event.ADOPT,''))
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_adopt_event(event,2)
|
2006-01-22 07:36:46 +05:30
|
|
|
self.db.add_event(event, self.trans)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
event_ref = RelLib.EventRef()
|
|
|
|
event_ref.set_reference_handle(event.handle)
|
|
|
|
event_ref.set_rol((RelLib.EventRef.PRIMARY,''))
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_event_ref(event_ref)
|
2006-01-22 07:36:46 +05:30
|
|
|
|
|
|
|
def func_person_deat(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
event = RelLib.Event()
|
|
|
|
if matches[2]:
|
|
|
|
event.set_description(matches[2])
|
2006-01-22 07:36:46 +05:30
|
|
|
event.set_type((RelLib.Event.DEATH,""))
|
|
|
|
self.parse_person_event(event,2)
|
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
self.db.add_event(event, self.trans)
|
2006-01-22 07:36:46 +05:30
|
|
|
|
|
|
|
event_ref = RelLib.EventRef()
|
|
|
|
event_ref.set_reference_handle(event.handle)
|
|
|
|
event_ref.set_role((RelLib.EventRef.PRIMARY,''))
|
|
|
|
|
2006-01-23 06:57:46 +05:30
|
|
|
if state.person.get_death_ref():
|
|
|
|
state.person.add_event_ref(event_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.set_death_ref(event_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_even(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
event = RelLib.Event()
|
|
|
|
if matches[2]:
|
|
|
|
event.set_description(matches[2])
|
|
|
|
self.parse_person_event(event,2)
|
2006-01-23 06:57:46 +05:30
|
|
|
(t,n) = event.get_type()
|
|
|
|
if t == RelLib.Event.CUSTOM and n in self.attrs:
|
2006-01-18 09:38:28 +05:30
|
|
|
attr = RelLib.Attribute()
|
2006-01-22 07:36:46 +05:30
|
|
|
attr.set_type((self.gedattr[n],''))
|
2006-01-18 09:38:28 +05:30
|
|
|
attr.set_value(event.get_description())
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_attribute(attr)
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
|
|
|
self.db.add_event(event, self.trans)
|
2006-01-22 07:36:46 +05:30
|
|
|
event_ref = RelLib.EventRef()
|
|
|
|
event_ref.set_reference_handle(event.handle)
|
|
|
|
event_ref.set_role((RelLib.EventRef.PRIMARY,''))
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_event_ref(event_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_sour(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
source_ref = self.handle_source(matches,2)
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_source_reference(source_ref)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_refn(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
if intRE.match(matches[2]):
|
|
|
|
try:
|
|
|
|
self.refn[self.person.get_handle()] = int(matches[2])
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_attr(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
attr = RelLib.Attribute()
|
2006-01-23 07:25:29 +05:30
|
|
|
n = matches[3]
|
|
|
|
atype = self.gedattr.get(n,RelLib.Attribute.CUSTOM)
|
|
|
|
if atype == RelLib.Attribute.CUSTOM:
|
|
|
|
attr.set_type((atype,n))
|
|
|
|
else:
|
|
|
|
attr.set_type((atype,''))
|
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
attr.set_value(matches[2])
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_attribute(attr)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-22 07:36:46 +05:30
|
|
|
def func_person_event(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
n = matches[3].strip()
|
2006-01-22 07:36:46 +05:30
|
|
|
if self.gedattr.has_key(n):
|
2006-01-18 09:38:28 +05:30
|
|
|
attr = RelLib.Attribute()
|
2006-01-22 07:36:46 +05:30
|
|
|
attr.set_type((self.gedattr[n],''))
|
|
|
|
attr.set_value(matches[2])
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_attribute(attr)
|
2006-01-18 09:38:28 +05:30
|
|
|
self.parse_person_attr(attr,2)
|
2006-01-22 07:36:46 +05:30
|
|
|
return
|
|
|
|
elif ged2gramps.has_key(n):
|
|
|
|
event = RelLib.Event()
|
|
|
|
event.set_type((ged2gramps[n],''))
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-22 07:36:46 +05:30
|
|
|
event = RelLib.Event()
|
2006-01-18 09:38:28 +05:30
|
|
|
val = self.gedsource.tag2gramps(n)
|
|
|
|
if val:
|
2006-01-22 08:35:48 +05:30
|
|
|
event.set_type((RelLib.Event.CUSTOM,val))
|
2006-01-18 09:38:28 +05:30
|
|
|
else:
|
2006-01-22 08:35:48 +05:30
|
|
|
event.set_type((RelLib.Event.CUSTOM,n))
|
2006-01-18 09:38:28 +05:30
|
|
|
|
|
|
|
self.parse_person_event(event,2)
|
|
|
|
if matches[2]:
|
|
|
|
event.set_description(matches[2])
|
|
|
|
self.db.add_event(event, self.trans)
|
2006-01-22 07:36:46 +05:30
|
|
|
|
|
|
|
event_ref = RelLib.EventRef()
|
|
|
|
event_ref.set_reference_handle(event.get_handle())
|
|
|
|
event_ref.set_role((RelLib.EventRef.PRIMARY,''))
|
2006-01-23 06:57:46 +05:30
|
|
|
state.person.add_event_ref(event_ref)
|
2006-01-22 07:36:46 +05:30
|
|
|
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
def parse_name(self,name,level,state):
|
|
|
|
"""Parses the person's name information"""
|
|
|
|
|
|
|
|
sub_state = CurrentState()
|
|
|
|
sub_state.person = state.person
|
|
|
|
sub_state.name = name
|
|
|
|
sub_state.level = level
|
|
|
|
|
|
|
|
while True:
|
|
|
|
matches = self.get_next()
|
|
|
|
if int(matches[0]) < level:
|
|
|
|
name.set_note(sub_state.get_text())
|
|
|
|
self.backup()
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
func = self.name_func.get(matches[1],self.func_name_undefined)
|
|
|
|
func(matches,sub_state)
|
|
|
|
|
|
|
|
def func_name_undefined(self,matches,state):
|
|
|
|
self.barf(state.level+1)
|
|
|
|
|
|
|
|
def func_name_note(self,matches,state):
|
|
|
|
state.add_to_note(self.parse_note(matches,state.name,
|
|
|
|
state.level+1,state.note))
|
|
|
|
|
|
|
|
def func_name_alia(self,matches,state):
|
|
|
|
aka = RelLib.Name()
|
|
|
|
try:
|
|
|
|
names = nameRegexp.match(matches[2]).groups()
|
|
|
|
except:
|
|
|
|
names = (matches[2],"","","","")
|
|
|
|
if names[0]:
|
|
|
|
aka.set_first_name(names[0])
|
|
|
|
if names[2]:
|
|
|
|
aka.set_surname(names[2])
|
|
|
|
if names[4]:
|
|
|
|
aka.set_suffix(names[4])
|
|
|
|
state.person.add_alternate_name(aka)
|
|
|
|
|
|
|
|
def func_name_npfx(self,matches,state):
|
|
|
|
state.name.set_title(matches[2])
|
|
|
|
|
|
|
|
def func_name_givn(self,matches,state):
|
|
|
|
state.name.set_first_name(matches[2])
|
|
|
|
|
|
|
|
def func_name_spfx(self,matches,state):
|
|
|
|
state.name.set_surname_prefix(matches[2])
|
|
|
|
|
|
|
|
def func_name_surn(self,matches,state):
|
|
|
|
state.name.set_surname(matches[2])
|
|
|
|
|
|
|
|
def func_name_marnm(self,matches,state):
|
|
|
|
self.parse_marnm(state.person,matches[2].strip())
|
|
|
|
|
|
|
|
def func_name_titl(self,matches,state):
|
|
|
|
state.name.set_suffix(matches[2])
|
|
|
|
|
|
|
|
def func_name_nsfx(self,matches,state):
|
|
|
|
if state.name.get_suffix() == "":
|
|
|
|
state.name.set_suffix(matches[2])
|
|
|
|
|
|
|
|
def func_name_nick(self,matches,state):
|
|
|
|
state.person.set_nick_name(matches[2])
|
|
|
|
|
|
|
|
def func_name_aka(self,matches,state):
|
|
|
|
lname = matches[2].split()
|
|
|
|
l = len(lname)
|
|
|
|
if l == 1:
|
|
|
|
state.person.set_nick_name(matches[2])
|
|
|
|
else:
|
|
|
|
name = RelLib.Name()
|
|
|
|
name.set_surname(lname[-1])
|
|
|
|
name.set_first_name(' '.join(lname[0:l-1]))
|
|
|
|
state.person.add_alternate_name(name)
|
|
|
|
|
|
|
|
def func_name_sour(self,matches,state):
|
|
|
|
sref = self.handle_source(matches,state.level+1)
|
|
|
|
state.name.add_source_reference(sref)
|
2006-01-18 09:38:28 +05:30
|
|
|
|
2006-01-23 07:25:29 +05:30
|
|
|
def skip_record(self,matches,state):
|
2006-01-18 09:38:28 +05:30
|
|
|
self.ignore_sub_junk(2)
|
|
|
|
|
2005-02-28 07:21:21 +05:30
|
|
|
#-------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#-------------------------------------------------------------------------
|
2004-05-24 10:02:19 +05:30
|
|
|
def extract_temple(matches):
|
2004-05-15 19:54:38 +05:30
|
|
|
try:
|
|
|
|
if const.lds_temple_to_abrev.has_key(matches[2]):
|
|
|
|
return const.lds_temple_to_abrev[matches[2]]
|
|
|
|
else:
|
2004-05-24 10:02:19 +05:30
|
|
|
values = matches[2].split()
|
2004-05-15 19:54:38 +05:30
|
|
|
return const.lds_temple_to_abrev[values[0]]
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
2004-10-23 09:26:48 +05:30
|
|
|
def create_id():
|
|
|
|
return Utils.create_id()
|
2005-12-06 12:08:09 +05:30
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
import sys
|
2006-01-17 09:22:25 +05:30
|
|
|
import profile
|
|
|
|
import const
|
|
|
|
from GrampsDb import gramps_db_factory, gramps_db_reader_factory
|
2005-12-06 12:08:09 +05:30
|
|
|
|
2006-01-18 09:38:28 +05:30
|
|
|
def callback(val):
|
|
|
|
print val
|
2006-01-17 09:22:25 +05:30
|
|
|
|
|
|
|
codeset = None
|
|
|
|
|
|
|
|
db_class = gramps_db_factory(const.app_gramps)
|
|
|
|
database = db_class()
|
|
|
|
database.load("test.grdb",lambda x: None, mode="w")
|
|
|
|
np = NoteParser(sys.argv[1],False)
|
2006-01-20 01:25:58 +05:30
|
|
|
import time
|
|
|
|
t = time.time()
|
2006-01-18 09:38:28 +05:30
|
|
|
g = GedcomParser(database,sys.argv[1],callback, codeset, np.get_map(),np.get_lines())
|
2006-01-20 01:25:58 +05:30
|
|
|
#profile.run('g.parse_gedcom_file(False)')
|
|
|
|
g.parse_gedcom_file(False)
|
|
|
|
print time.time() - t
|