Generate an index for a book. The input file consists of a set of index entries.
ID: 3630235 • Letter: G
Question
Generate an index for a book. The input file consists of a set of index entries. Each line consists of the string IX:, followed by an index entry name enclosed in braces, followed by a page number that is enclosed in braces. Each ! in an index entry name represents a sub-level. A (| represents the start of a range, and a |) represents the end of the range. Occasionally, this range will be the same page. In that case, output only a Single page number. Otherwise, do not collapse or expand ranges on your own. As an example, Figure 4.74 shows sample input, and Figure 4.75 shows the corresponding output.
IX: {Series |(} {2}
IX: {Series!geometric|(} {4}
IX: {Euler's constant} {4}
IX: {Series!geometric|)} {4}
IX: {Series!arithmetic|(} {4}
IX: {Series!arithmetic|)} {5}
IX: {Series!harmonic|(} {5}
IX: {Euler's constant} {5}
IX: {Series!harmonic|)} {5})
IX: {Series|)} {5}
Figure 4.74 Sample input
Euler’s constant: 4, 5
Series: 2-5
Arithmetic: 4-5
geometric: 4
harmonic: 5
Figure 4.75 Sample output
Explanation / Answer
#!/usr/bin/python2.6 # -*- coding: utf-8 -*- """ Read page indices to build book index. """ # index should be between 3 and 9 pages #1. fix tricky names (e.g., van Rossum, Van Doren) import codecs import getopt from optparse import OptionParser import os import re import sys ABBRV = ( ('NPOV', 'Neutral Point of View (NPOV)'), ('IETF', 'Internet Engineering Task Force (IETF)'), ('ODF', 'Open Document Format (ODF)'), ('W3C', 'World Wide Web Consortium (W3C)'), ('OASIS', 'Organization for the Advancement of Structured Information Standards'), ) BORING_WORDS = ('', 'a', 'also', 'of', 'if', 'in', 'an', 'to', 'for', 'the', 'and', 're') NAMES = set() # from http://names.mongabay.com/ for line in open('/home/reagle/joseph/2010/03/names-male.csv'): NAMES.add(line.split(' ')[0]) for line in open('/home/reagle/joseph/2010/03/names-female.csv'): NAMES.add(line.split(' ')[0]) NAMES_EXCEPTIONS = set(['LONG']) NAMES.difference_update(NAMES_EXCEPTIONS) # exceptions to the lists KEEP_LOWER = ('danah', 'boyd') def a_name(text): """Test if a common first name. >>> a_name("John") True """ text = text.upper() if text[1] == '.': # e.g., H. G. Wells return True if text in NAMES: return True return False def strip_var(v): """Strip a bit of text >>> strip_var(' foo bar ') 'foo bar' """ if v: return v.strip() else: return None def build_index(text): index = {} pattern_re = re.compile( r'(?PD.+?) (?:see (?P.*)|(?P[0-9,-n]+(?!.)) ?(?P.*))') for line in text: if line == '': continue if opts.debug: print 'line =', line topic, see_ref, pages, subtopic = pattern_re.match(line).groupdict().values() topic, see_ref, pages, subtopic = map(strip_var, (topic, see_ref, pages, subtopic)) chunks = topic.split(' ') if len(chunks) > 1: if a_name(chunks[0]): pre, last = topic.split(' ', 1) topic = chunks[-1] + ', ' + ' '.join(chunks[0:-1]) if topic not in index: index[topic] = {} if see_ref: if see_ref.startswith('also '): index[topic].setdefault('also', []).append(see_ref[5:]) else: index[topic].setdefault('see', []).append(see_ref) elif subtopic: index[topic].setdefault('subtopics', {}).setdefault(subtopic, []).append(pages) else: index[topic].setdefault('pages', []).append(pages) return index def entitle(s): '''title case first word of refs >>> entitle('also monographic principle') 'also monographic principle' >>> entitle('monographic principle') 'Monographic principle' ''' new_refs = [] if s.startswith(' see also '): # remove 'see' prefix text s = s[10:] prefix = '. *See also* ' elif s.startswith(' see '): s = s[5:] prefix = '. *See* ' else: prefix = '' refs = s.split('; ') for ref in refs: # check refs words = ref.split() if words[0] not in BORING_WORDS and words[0][0].islower(): words[0] = words[0].title() words = ' '.join(words) new_refs.append(words) return prefix + '; '.join(sorted(new_refs)) range_re = re.compile(u'd+[-
Related Questions
Navigate
Integrity-first tutoring: explanations and feedback only — we do not complete graded work. Learn more.