Project

General

Profile

views.py

Luke Murphey, 11/21/2015 08:24 AM

Download (32.9 KB)

 
1
from django.shortcuts import get_object_or_404, render_to_response
2
from django.core import serializers
3
from django.core.urlresolvers import reverse
4
from django.http import HttpResponse, Http404
5
from django.core.servers.basehttp import FileWrapper
6
from django.template.context import RequestContext, Context
7
from django.template import loader
8
from django.views.decorators.cache import cache_page
9
from django.core.cache import cache
10
from django.template.defaultfilters import slugify
11
from django.conf import settings
12

    
13
import json
14
import logging
15
import math
16
import difflib
17
import re
18
import os
19

    
20
from reader.models import Work, WorkAlias, Division, Verse, Author, RelatedWork
21
from reader.language_tools.greek import Greek
22
from reader import language_tools
23
from reader.shortcuts import string_limiter, uniquefy, ajaxify, cache_page_if_ajax
24
from reader.utils import get_word_descriptions
25
from reader.contentsearch import search_verses
26
from reader.language_tools import normalize_unicode
27

    
28
# Try to import the ePubExport but be forgiving if the necessary dependencies do not exist
29
try:
30
    from reader.ebook import ePubExport, MobiConvert
31
except ImportError:
32
    # Cannot import ePubExport and MobiConvert, this means we won't be able to make ebook files
33
    ePubExport = None
34
    MobiConvert = None
35

    
36
JSON_CONTENT_TYPE = "application/json" # Per RFC 4627: http://www.ietf.org/rfc/rfc4627.txt
37

    
38
# Get an instance of a logger
39
logger = logging.getLogger(__name__)
40

    
41
# These times are for making the caching decorators clearer
42
minutes = 60
43
hours   = 60 * minutes
44
days    = 24 * hours
45
months  = 30 * days
46
years   = 365.25 * days
47

    
48
@cache_page(8 * hours)
49
def home(request):
50
    greek_works_count = Work.objects.filter(language="Greek").count()
51
    english_works_count = Work.objects.filter(language="English").count()
52

    
53
    return render_to_response('home.html',
54
                              {'greek_works_count'   : greek_works_count,
55
                               'english_works_count' : english_works_count},
56
                              context_instance=RequestContext(request))
57

    
58
@cache_page(8 * hours)
59
def about(request):
60

    
61
    return render_to_response('about.html',
62
                              { 'title' : 'About TextCritical.net'},
63
                              context_instance=RequestContext(request))
64

    
65
@cache_page(8 * hours)
66
def contact(request):
67

    
68
    return render_to_response('contact.html',
69
                              { 'title' : 'Contact Us'},
70
                              context_instance=RequestContext(request))
71

    
72
def search(request, query=None):
73

    
74
    authors = Author.objects.all().order_by("name")
75
    works = Work.objects.all().order_by("title")
76

    
77
    if 'q' in request.GET:
78
        query = request.GET['q']
79
    else:
80
        query = None
81

    
82
    if 'page' in request.GET:
83
        page = request.GET['page']
84
    else:
85
        page = None
86

    
87
    return render_to_response('search.html',
88
                              {'title'   : 'Search',
89
                               'authors' : authors,
90
                               'works'   : works,
91
                               'query'   : query,
92
                               'page'    : page
93
                               },
94
                              context_instance=RequestContext(request))
95

    
96
@cache_page(2 * hours)
97
def works_index(request):
98

    
99
    if 'search' in request.GET:
100
        search_filter = request.GET['search']
101
    else:
102
        search_filter = None
103

    
104
    return render_to_response('works_index.html',
105
                             {'title' : 'Works',
106
                              'filter': search_filter},
107
                              context_instance=RequestContext(request))
108

    
109
def get_chapter_for_division(division):
110
    """
111
    Get the division that contains the next part of readable content.
112
    """
113

    
114
    divisions = Division.objects.filter(work=division.work, readable_unit=True, sequence_number__gte=division.sequence_number).order_by("sequence_number")[:1]
115

    
116
    if len(divisions) > 0:
117
        return divisions[0]
118

    
119
def get_chapters_list( division, count=9):
120
    """
121
    Get the list of chapters for pagination.
122
    """
123

    
124
    pages_before = math.ceil( (count - 1.0) / 2 )
125
    pages_after = math.floor( (count - 1.0) / 2 )
126

    
127
    # Filter down the list to ones within the given work that are readable units
128
    divisions = Division.objects.filter(work=division.work, readable_unit=True)
129

    
130
    # Filter down the list to those in the same parent division
131
    if division.parent_division is not None:
132
        divisions = divisions.filter(parent_division=division.parent_division)
133

    
134
    # If no parent division was found, then filter the list down to the parent of the first division
135
    # so that we don't show entries for different divisions in the list
136
    else:
137

    
138
        # Try to get the first division
139
        first_division = divisions[:1]
140

    
141
        # If we got a first division, then filter on this one's parent
142
        if len(first_division) > 0 and first_division[0].parent_division is not None:
143
            divisions = divisions.filter(parent_division=first_division[0].parent_division)
144
        else:
145
            divisions = divisions.filter(parent_division=None)
146

    
147
    # Get the number of pages before
148
    divisions_before = divisions.filter(sequence_number__lte=division.sequence_number).order_by("-sequence_number")[:pages_before]
149

    
150
    divisions_after = divisions.filter(sequence_number__gt=division.sequence_number).order_by("sequence_number")[:pages_after]
151

    
152
    final_list = []
153
    final_list.extend(divisions_before)
154
    final_list.reverse()
155
    final_list.extend(divisions_after)
156

    
157
    return final_list
158

    
159
def get_division_and_verse( work, division_0=None, division_1=None, division_2=None, division_3=None, division_4=None ):
160
    """
161
    This function gets the division that is associated with the given descriptor set. If the final division descriptor is
162
    actually a verse indicator, then, return the verse indicator.
163

164
    Arguments:
165
    work -- The work to get the division from
166
    division_0 -- The highest division to lookup
167
    division_1 -- The next highest division to lookup
168
    division_2 -- The next highest division to lookup
169
    division_3 -- The next highest division to lookup (this can only be a verse indicator)
170
    """
171

    
172
    # Assume that the descriptors are for divisions
173
    division = get_division( work, division_0, division_1, division_2, division_3 )
174

    
175
    if division is not None:
176
        return division, division_4
177

    
178
    # If we couldn't find a division, then let's assume that the last descriptor was for a verse
179
    if division_3 is not None:
180
        return get_division( work, division_0, division_1, division_2 ), division_3
181

    
182
    elif division_2 is not None:
183
        return get_division( work, division_0, division_1 ), division_2
184

    
185
    elif division_1 is not None:
186
        return get_division( work, division_0 ), division_1
187

    
188
    elif division_0 is not None:
189
        return get_division( work ), division_0
190

    
191
def get_division( work, division_0=None, division_1=None, division_2=None, division_3=None ):
192
    """
193
    This function gets the division that is associated with the given descriptor set.
194

195
    Arguments:
196
    work -- The work to get the division from
197
    division_0 -- The highest division to lookup
198
    division_1 -- The next highest division to lookup
199
    division_2 -- The next highest division to lookup
200
    division_3 -- The next highest division to lookup
201
    """
202

    
203
    # Filter down the list to the division within the given work
204
    divisions = Division.objects.filter(work=work)
205

    
206
    # Get the division if we got three levels deep of descriptors ("1.2.3")
207
    if division_0 is not None and division_1 is not None and division_2 is not None and division_3 is not None:
208

    
209
        divisions = divisions.filter(parent_division__parent_division__parent_division__parent_division=None,
210
                                     parent_division__parent_division__parent_division__descriptor__iexact=division_0,
211
                                     parent_division__parent_division__descriptor__iexact=division_1,
212
                                     parent_division__descriptor__iexact=division_2,
213
                                     descriptor=division_3)
214

    
215
    # Get the division if we got three levels deep of descriptors ("1.2.3")
216
    elif division_0 is not None and division_1 is not None and division_2 is not None:
217

    
218
        divisions = divisions.filter(parent_division__parent_division__parent_division=None,
219
                                     parent_division__parent_division__descriptor__iexact=division_0,
220
                                     parent_division__descriptor__iexact=division_1,
221
                                     descriptor=division_2)
222

    
223
    # Get the division if we got two levels deep of descriptors ("1.2")
224
    elif division_0 is not None and division_1:
225

    
226
        divisions = divisions.filter(parent_division__parent_division=None,
227
                                     parent_division__descriptor__iexact=division_0,
228
                                     descriptor=division_1)
229

    
230
    # Get the division if we got one level deep of descriptors ("1")
231
    elif division_0 is not None:
232
        divisions = divisions.filter(parent_division=None,
233
                                     descriptor__iexact=division_0)
234

    
235

    
236
    # Only grab one
237
    divisions = divisions[:1]
238

    
239
    if len(divisions) > 0:
240
        return divisions[0]
241
    else:
242
        return None # We couldn't find a matching division, perhaps one doesn't exist with the given set of descriptors?
243

    
244
def download_work(request, title=None,):
245

    
246
    if 'refresh' in request.GET and request.GET['refresh'].strip().lower() in ["1", "true", "t", "", None]:
247
        use_cached = False
248
    else:
249
        use_cached = True
250

    
251
    # Get the format that the user is requesting
252
    if 'format' in request.GET:
253
        book_format = request.GET['format']
254

    
255
    # Ensure the format is valid
256
    book_format = book_format.strip().lower()
257

    
258
    mime_types = { 'epub' : 'application/epub+zip',
259
                   'mobi' : 'application/x-mobipocket-ebook'
260
                 }
261

    
262
    if book_format not in mime_types:
263
        raise Http404('No eBook file found for the given format')
264

    
265
    # Try to get the work
266
    work_alias = get_object_or_404(WorkAlias, title_slug=title)
267
    work = work_alias.work
268

    
269
    # Get the filename of the eBook
270
    ebook_file = work.title_slug + "." + book_format
271
    ebook_file_full_path = os.path.join( settings.GENERATED_FILES_DIR, ebook_file)
272

    
273
    # If we are using the cached file, then try to make it
274
    if not use_cached or not os.path.exists(ebook_file_full_path):
275

    
276
        # Make the epub. Note that we will need to make the epub even if we need to create a mobi file since mobi's are made from epub's
277
        if book_format == "mobi":
278
            epub_file_full_path = os.path.join( settings.GENERATED_FILES_DIR,  work.title_slug + ".epub" )
279
        else:
280
            epub_file_full_path = ebook_file_full_path
281

    
282
        # Stop if we don't have the ability to produce ebook files
283
        if ePubExport is None:
284
            raise Http404('eBook file not found')
285

    
286
        # Generate the ebook
287
        fname = ePubExport.exportWork(work, epub_file_full_path)
288

    
289
        logger.info("Created epub, filename=%s", fname)
290

    
291
        # If we need to make a mobi file, do it now
292
        if book_format == "mobi":
293

    
294
            # Stop if we don't have the ability to produce mobi files
295
            if MobiConvert is None:
296
                raise Http404('eBook file not found')
297

    
298
            # Generate the ebook
299
            fname = MobiConvert.convertEpub(work, epub_file_full_path, ebook_file_full_path)
300

    
301
            if os.path.exists(ebook_file_full_path):
302
                logger.info("Created mobi, filename=%s", fname)
303
            else:
304
                logger.info("Failed to create mobi, filename=%s", fname)
305
                raise Http404('eBook file not found')
306

    
307
    # Stream the file from the disk
308
    wrapper = FileWrapper(file(ebook_file_full_path))
309

    
310
    response = HttpResponse(wrapper, content_type=mime_types[book_format])
311
    response['Content-Disposition'] = 'attachment; filename="%s"' % (ebook_file)
312
    response['Content-Length'] = os.path.getsize(ebook_file_full_path)
313
    return response
314

    
315
@cache_page_if_ajax(12 * months)
316
@ajaxify
317
def read_work(request, author=None, language=None, title=None, division_0=None, division_1=None, division_2=None, division_3=None, division_4=None, leftovers=None, **kwargs):
318

    
319
    # Some warnings that should be posted to the user
320
    warnings = []
321

    
322
    # Try to get the work
323
    work_alias = get_object_or_404(WorkAlias, title_slug=title)
324
    work = work_alias.work
325

    
326
    # Get the chapter
327
    division, verse_to_highlight = get_division_and_verse(work, division_0, division_1, division_2, division_3, division_4)
328

    
329
    # Note a warning if were unable to find the given chapter
330
    chapter_not_found = False
331

    
332
    if leftovers is not None:
333
        warnings.append( ("Section not found", "The place in the text you asked for could not be found (the reference you defined is too deep).") )
334
        chapter_not_found = True
335

    
336
    elif division is None and division_0 is not None:
337
        warnings.append( ("Section not found", "The place in the text you asked for could not be found.") )
338
        chapter_not_found = True
339

    
340
    # Start the user off at the beginning of the work
341
    if division is None:
342
        division = Division.objects.filter(work=work).order_by("sequence_number")[:1]
343

    
344
        if len(division) == 0:
345
            raise Http404('Division could not be found.')
346
        else:
347
            division = division[0]
348

    
349
    # Make sure the verse exists
350
    verse_not_found = False
351

    
352
    if chapter_not_found == False and verse_to_highlight is not None:
353
        if Verse.objects.filter(division=division, indicator=verse_to_highlight).count() == 0:
354
            warnings.append( ("Verse not found", "The verse you specified couldn't be found.") )
355
            verse_not_found = True
356

    
357
    # Get the readable unit
358
    chapter = get_chapter_for_division(division)
359

    
360
    # Get the verses to display
361
    verses = Verse.objects.filter(division=chapter).all()
362

    
363
    # Get the divisions that ought to be included in the table of contents
364
    divisions = Division.objects.filter(work=work, readable_unit=False)
365

    
366
    if len(divisions) == 0:
367
        divisions = None
368

    
369
    # Get the list of chapters for pagination
370
    chapters = get_chapters_list(chapter)
371

    
372
    # Get the amount of progress (based on chapters)
373
    total_chapters     = Division.objects.filter(work=division.work, readable_unit=True).count()
374
    completed_chapters = Division.objects.filter(work=division.work, readable_unit=True, sequence_number__lte=chapter.sequence_number).count()
375
    remaining_chapters = total_chapters - completed_chapters
376
    progress = ((1.0 * completed_chapters ) / total_chapters) * 100
377

    
378
    # Get the next and previous chapter number
379
    previous_chapter = Division.objects.filter(work=work, readable_unit=True, sequence_number__lt=chapter.sequence_number).order_by('-sequence_number')[:1]
380
    next_chapter = Division.objects.filter(work=work, readable_unit=True, sequence_number__gt=chapter.sequence_number).order_by('sequence_number')[:1]
381

    
382
    if len(previous_chapter) > 0:
383
        previous_chapter = previous_chapter[0]
384
    else:
385
        previous_chapter = None
386

    
387
    if len(next_chapter) > 0:
388
        next_chapter = next_chapter[0]
389
    else:
390
        next_chapter = None
391

    
392
    # Get related works
393
    related_works_tmp = RelatedWork.objects.filter(work=work)#.values_list('related_work__title_slug', flat=True)
394
    related_works = []
395

    
396
    for r in related_works_tmp:
397
        related_works.append(r.related_work)
398

    
399
    # Make the chapter title
400
    title = work.title
401

    
402
    # Add the chapter
403
    chapter_description = chapter.get_division_description()
404
    title = title + " " + chapter_description
405

    
406
    # Add the verse
407
    if verse_to_highlight:
408

    
409
        if chapter_description.find(".") >= 0:
410
            title = title + "."
411
        else:
412
            title = title + ":"
413

    
414
        title = title + verse_to_highlight
415

    
416
    response = render_to_response('read_work.html',
417
                                 {'title'                : title,
418
                                  'work_alias'           : work_alias,
419
                                  'warnings'             : warnings,
420
                                  'work'                 : work,
421
                                  'related_works'        : related_works,
422
                                  'verses'               : verses,
423
                                  'divisions'            : divisions,
424
                                  'chapter'              : chapter,
425
                                  'chapters'             : chapters,
426
                                  'authors'              : work.authors.filter(meta_author=False),
427
                                  'next_chapter'         : next_chapter,
428
                                  'previous_chapter'     : previous_chapter,
429
                                  'verse_to_highlight'   : verse_to_highlight,
430
                                  'total_chapters'       : total_chapters,
431
                                  'completed_chapters'   : completed_chapters,
432
                                  'remaining_chapters'   : remaining_chapters,
433
                                  'chapter_not_found'    : chapter_not_found,
434
                                  'verse_not_found'      : verse_not_found,
435
                                  'progress'             : progress},
436
                                  context_instance=RequestContext(request))
437

    
438
    # If the verse could not be found, set a response code to note that we couldn't get the content that the user wanted so that caching doesn't take place
439
    if verse_not_found:
440
        response.status_code = 210
441
        add_never_cache_headers(response)
442
    else:
443
        patch_cache_control(response, max_age=12 * months)
444

    
445

    
446
    return response
447

    
448
@cache_page(8 * hours)
449
def robots_txt(request):
450
    return render_to_response('robots.txt',
451
                              context_instance=RequestContext(request))
452

    
453
@cache_page(8 * hours)
454
def humans_txt(request):
455
    return render_to_response('humans.txt',
456
                              context_instance=RequestContext(request))
457

    
458
def not_found_404(request):
459

    
460
    template = loader.get_template('404.html')
461
    context = Context({'title': 'Not Found'})
462

    
463
    return HttpResponse(content=template.render(context), content_type='text/html; charset=utf-8', status=404)
464

    
465
def tests(request):
466
    return render_to_response('test.html',
467
                              {'title'               : 'Tests',
468
                               'include_default_css' : 0},
469
                              context_instance=RequestContext(request))
470

    
471
@cache_page(8 * hours)
472
def beta_code_converter(request):
473
    return render_to_response('beta_code_converter.html',
474
                              {'title'               : 'Beta-code Converter'},
475
                              context_instance=RequestContext(request))
476

    
477
# -----------------------------------
478
# API views are defined below
479
# -----------------------------------
480
def render_api_response(request, content):
481

    
482
    # For XML, see: http://code.activestate.com/recipes/577268-python-data-structure-to-xml-serialization/
483
    raw_content = json.dumps(content)
484

    
485
    return HttpResponse(raw_content, content_type=JSON_CONTENT_TYPE)
486

    
487
def render_api_error(request, message, status=400):
488

    
489
    content = { 'message' : message }
490

    
491
    raw_content = json.dumps(content)
492

    
493
    return HttpResponse(raw_content, content_type=JSON_CONTENT_TYPE, status=status)
494

    
495
def render_queryset_api_response(request, content):
496

    
497
    response = HttpResponse(content_type=JSON_CONTENT_TYPE)
498
    json_serializer = serializers.get_serializer("json")()
499
    json_serializer.serialize(content, stream=response, indent=2)
500

    
501
    return response
502

    
503
def api_index(request):
504

    
505
    urls = []
506

    
507
    def make_url( url_list, name ):
508
        url_list.append( {"path" : reverse(name), "name" : name } )
509

    
510
    make_url(urls, "api_index")
511
    make_url(urls, "api_beta_code_to_unicode")
512
    make_url(urls, "api_works_list")
513

    
514
    return render_api_response(request, urls)
515

    
516
def description_id_fun(x):
517
    """
518
    Provides the string necessary to uniquefy WordDescription instances.
519

520
    Arguments:
521
    x -- a word description instance.
522
    """
523

    
524
    return str(x)
525

    
526
@cache_page(1 * hours)
527
def api_works_typehead_hints(request):
528

    
529
    hints = []
530

    
531
    # Get the names of works
532
    for work in Work.objects.all().values('title', 'title_slug'):
533
        hints.append( {
534
                       'desc': work['title'],
535
                       'url': reverse('read_work', args=[work['title_slug']])
536
                       } )
537

    
538
    #hints.extend( Work.objects.all().values_list('title', flat=True) )
539

    
540
    # Get the author names
541
    for author in uniquefy(Author.objects.all().values_list('name', flat=True)):
542
        hints.append( {
543
                       'desc': author,
544
                       'url': ''
545
                       } )
546
    #hints.extend( Author.objects.all().values_list('name', flat=True) )
547

    
548
    # Uniquefy the list
549
    #hints = uniquefy(hints)
550

    
551
    # Return the results
552
    return render_api_response(request, hints)
553

    
554
@cache_page(15 * minutes)
555
def api_search(request, search_text=None ):
556

    
557
    # Get the text to search for
558
    if search_text is not None and len(search_text) > 0:
559
        pass
560
    elif 'q' in request.GET:
561
        search_text = request.GET['q']
562
    else:
563
        return render_api_error(request, "No search query was provided", 400)
564

    
565
    # Normalize the query string
566
    search_text = language_tools.normalize_unicode(search_text)
567

    
568
    # Get the page number
569
    if 'page' in request.GET:
570
        try:
571
            page = int(request.GET['page'])
572
        except ValueError:
573
            page = 1
574
    else:
575
        page = 1
576

    
577
    # Get the page length
578
    if 'pagelen' in request.GET:
579
        try:
580
            pagelen = int(request.GET['pagelen'])
581
        except ValueError:
582
            pagelen = 10
583
    else:
584
        pagelen = 10
585

    
586
    # Determine if the related forms ought to be included
587
    if 'related_forms' in request.GET:
588
        try:
589
            include_related_forms = bool( int(request.GET['related_forms']) )
590
        except ValueError:
591
            include_related_forms = False
592
    else:
593
        include_related_forms = False
594

    
595
    # Perform the search
596
    search_results = search_verses( search_text, page=page, pagelen=pagelen, include_related_forms=include_related_forms )
597

    
598
    # This will be were the results are stored
599
    results_lists = []
600

    
601
    # Prepare the results
602
    for result in search_results.verses:
603

    
604
        d = {}
605

    
606
        # Build the list of arguments necessary to make the URL
607
        args = [ result.verse.division.work.title_slug ]
608
        args.extend( result.verse.division.get_division_indicators() )
609

    
610
        # Determine if the last verse is a lone verse. If it is, then don't put the verse in the URL args.
611
        if Verse.objects.filter(division=result.verse.division).count() > 1:
612
            division_has_multiple_verses = True
613
            args.append( str(result.verse) )
614
        else:
615
            division_has_multiple_verses = False
616

    
617
        d['url']             = reverse('read_work', args=args )
618
        d['verse']           = str(result.verse)
619
        d['division']        = result.verse.division.get_division_description()
620
        d['work_title_slug'] = result.verse.division.work.title_slug
621
        d['work']            = result.verse.division.work.title
622
        d['highlights']      = result.highlights
623
        d['content_snippet'] = string_limiter(result.verse.content, 80)
624

    
625
        # If the verse is not a lone verse (has other verses next to it under the parent division) then add the verse information to the list
626
        if division_has_multiple_verses:
627

    
628
            if '.' in d['division']:
629
                d['description'] = d['division'] + "." + d['verse']
630
            else:
631
                d['description'] = d['division'] + ":" + d['verse']
632

    
633
        # If the verse is a lone verse, don't bother adding it
634
        else:
635
            d['description'] = d['division']
636

    
637
        # Append the results
638
        results_lists.append(d)
639

    
640
    results_set = {
641
                   'result_count' : search_results.result_count,
642
                   'page' : search_results.page,
643
                   'page_len' : search_results.pagelen,
644
                   'results'  : results_lists
645
                   }
646

    
647

    
648
    # Return the results
649
    return render_api_response(request, results_set)
650

    
651
@cache_page(15 * minutes)
652
def api_convert_query_beta_code(request, search_query):
653

    
654
    if search_query is None or len(search_query) == 0 and 'q' in request.GET:
655
        search_query = request.GET['q']
656

    
657
    # Break up the query into individual search strings
658

    
659
    queries = search_query.split(" ")
660

    
661
    # convert all items that
662
    beta_fields = ["content", "no_diacritics", "section", None]
663

    
664
    # This will be the new search string
665
    new_queries = []
666

    
667
    for q in queries:
668

    
669
        # By default, assume that the query is unchanged
670
        new_q = q
671

    
672
        # If the query has the field specified, then separate it
673
        if ":" in q:
674
            field, value = re.split("[:]", q, 1)
675

    
676
        # If the query has no field, then set the field to none
677
        else:
678
            field = None
679
            value = q
680

    
681
        # If the field is for a field that can contain beta-code, then convert it
682
        if field in beta_fields:
683

    
684
            # Add the field to the query if it exists
685
            if field is not None:
686
                new_q = field + ":"
687
            else:
688
                new_q = ""
689

    
690
            # Add and convert the field
691
            if re.match("\w+", value ):
692
                # If is just ASCII, then convert it
693
                new_q = new_q + Greek.beta_code_to_unicode(value)
694
            else:
695
                # Otherwise, don't. It may be Greek already
696
                new_q = new_q + value
697

    
698
        # Add the query to the list
699
        new_queries.append(new_q)
700

    
701
    return render_api_response(request, " ".join(new_queries) )
702

    
703
@cache_page(15 * minutes)
704
def api_word_parse(request, word=None):
705

    
706
    if word is None or len(word) == 0 and 'word' in request.GET:
707
        word = request.GET['word']
708

    
709
    word_basic_form = language_tools.strip_accents( normalize_unicode(word) )
710

    
711
    # Do a search for the parse
712
    ignoring_diacritics = False
713
    ignoring_numerals = False
714
    descriptions = get_word_descriptions( word, False )
715

    
716
    # If we couldn't find the word, then try again ignoring diacritical marks
717
    if len(descriptions) == 0:
718
        ignoring_diacritics = True
719
        descriptions = get_word_descriptions( word, True )
720

    
721
    # If we couldn't find the word and it has numbers (indicating a particular parse, then remove the numbers and try again)
722
    if len(descriptions) == 0 and re.search("[0-9]", word) is not None:
723

    
724
        # Strip the numbers
725
        stripped_word = normalize_unicode(re.sub("[0-9]", "", word))
726

    
727
        ignoring_numerals = True
728

    
729
        # Try without ignoring diacritics
730
        ignoring_diacritics = False
731
        descriptions = get_word_descriptions( stripped_word, False )
732

    
733
        # Try with ignoring diacritics
734
        if len(descriptions) == 0:
735
            ignoring_diacritics = True
736
            descriptions = get_word_descriptions( stripped_word, True )
737

    
738
    # Make the final result to be returned
739
    results = []
740

    
741
    for d in descriptions:
742

    
743
        entry = {}
744

    
745
        entry["meaning"] = d.meaning
746
        entry["description"] = str(d)
747
        entry["ignoring_numerals"] = ignoring_numerals
748
        entry["ignoring_diacritics"] = ignoring_diacritics
749
        entry["form"] = d.word_form.form
750

    
751
        if d.lemma:
752
            entry["lemma"] = d.lemma.lexical_form
753
        else:
754
            entry["lemma"] = None
755

    
756
        # Calculate the similarity so that sort the results by similarity
757
        entry["similarity"] = int(round(difflib.SequenceMatcher(None, entry["lemma"], word_basic_form).ratio() * 100, 0))
758

    
759
        results.append(entry)
760

    
761
    # If we are ignoring diacritics, then sort the entries by the similarity
762
    def word_compare(x, y):
763
        return y["similarity"] - x["similarity"]
764

    
765
    results = sorted(results, cmp=word_compare)
766

    
767
    # Return the response
768
    return render_api_response(request, results)
769

    
770
@cache_page(15 * minutes)
771
def api_word_parse_beta_code(request, word=None):
772

    
773
    if word is None or len(word) == 0 and 'word' in request.GET:
774
        word = request.GET['word']
775

    
776
    return api_word_parse(request, Greek.beta_code_to_unicode(word))
777

    
778
@cache_page(15 * minutes)
779
def api_unicode_to_betacode(request, text=None):
780

    
781
    if text is None or len(text) == 0 and 'text' in request.GET:
782
        text = request.GET['text']
783

    
784
    d = {}
785

    
786
    d['unicode'] = text
787
    d['beta-code'] = Greek.unicode_to_beta_code(text)
788

    
789
    return render_api_response(request, d)
790

    
791
@cache_page(15 * minutes)
792
def api_beta_code_to_unicode(request, text=None):
793

    
794
    if text is None or len(text) == 0 and 'text' in request.GET:
795
        text = request.GET['text']
796

    
797
    d = {}
798

    
799
    d['unicode'] = Greek.beta_code_to_unicode(text)
800
    d['beta-code'] = text
801

    
802
    return render_api_response(request, d)
803

    
804
@cache_page(15 * minutes)
805
def api_works_list_for_author(request, author):
806
    return api_works_list(request, author)
807

    
808
@cache_page(4 * hours)
809
def api_works_list(request, author=None):
810

    
811
    # Get the relevant works
812
    if author is not None:
813
        works = Work.objects.filter(authors__name=author)
814
    else:
815
        works = Work.objects.all()
816

    
817
    # Prefetch the authors and editors, sort the results so that the response is consistent
818
    works = works.order_by("title").prefetch_related('authors').prefetch_related('editors')
819

    
820
    # Make the resulting JSON
821
    works_json = []
822

    
823
    for work in works:
824

    
825
        works_json.append( {
826
                            'title' : work.title,
827
                            'title_slug' : work.title_slug,
828
                            'language' : work.language,
829
                            'author' : ", ".join(work.authors.values_list('name', flat=True)),
830
                            'editor' : ", ".join(work.editors.values_list('name', flat=True)),
831
                            } )
832

    
833
    return render_api_response(request, works_json)
834

    
835
def assign_divisions(ref_components):
836

    
837
    division_0, division_1, division_2, division_3, division_4 = None, None, None, None, None
838

    
839
    if len(ref_components) >= 5:
840
        division_0, division_1, division_2, division_3, division_4 = ref_components[:5]
841
    elif len(ref_components) == 4:
842
        division_0, division_1, division_2, division_3 = ref_components
843
    elif len(ref_components) == 3:
844
        division_0, division_1, division_2 = ref_components
845
    elif len(ref_components) == 2:
846
        division_0, division_1 = ref_components
847
    elif len(ref_components) == 1:
848
        division_0 = ref_components[0]
849

    
850
    return division_0, division_1, division_2, division_3, division_4
851

    
852
def swap_slugs(divisions_with_spaces, *args):
853

    
854
    results = []
855

    
856
    for arg in args:
857
        if arg is not None:
858
            for d in divisions_with_spaces:
859
                arg = arg.replace( slugify(d['descriptor']), d['descriptor'])
860

    
861
        results.append(arg)
862

    
863
    return results
864

    
865
def parse_reference_and_get_division_and_verse(regex, escaped_ref, work, divisions_with_spaces):
866

    
867
    # Try parsing the reference normally
868
    division_0, division_1, division_2, division_3, division_4 = assign_divisions(re.split(regex, escaped_ref))
869

    
870
    # Swap back the slugs
871
    division_0, division_1, division_2, division_3, division_4 = swap_slugs(divisions_with_spaces, division_0, division_1, division_2, division_3, division_4)
872

    
873
    # Try to resolve the division
874
    division, verse_to_highlight = get_division_and_verse(work, division_0, division_1, division_2, division_3, division_4)
875

    
876
    return division, verse_to_highlight, division_0, division_1, division_2, division_3, division_4
877

    
878
def api_resolve_reference(request, work=None, ref=None):
879

    
880
    # Get the work and reference from the arguments
881
    if work is None and 'work' in request.REQUEST:
882
        work = request.REQUEST['work']
883

    
884
    if ref is None and 'ref' in request.REQUEST:
885
        ref = request.REQUEST['ref']
886

    
887
    # Get the work that is being referred to
888
    work_alias = get_object_or_404(WorkAlias, title_slug=work)
889

    
890
    # Get the division names that have spaces in them
891
    divisions_with_spaces = Division.objects.filter(work=work_alias.work, descriptor__contains=' ').values('descriptor')
892

    
893
    # Start making the arguments the we need for making the URL
894
    args = [work_alias.work.title_slug]
895

    
896
    # Swap out the titles of the divisions with spaces in the name with the title slug (we will swap them back when we are done)
897
    escaped_ref = ref + ''
898

    
899
    for d in divisions_with_spaces:
900
        escaped_ref = escaped_ref.replace(d['descriptor'], slugify(d['descriptor']))
901

    
902
    # Try to resolve the division
903
    division, verse_to_highlight, division_0, division_1, division_2, division_3, division_4 = parse_reference_and_get_division_and_verse('[ .:]+', escaped_ref, work_alias.work, divisions_with_spaces)
904

    
905
    # If parsing it normally didn't parse right, then try without using the period as a separator
906
    if division is None and division_0 is not None:
907
        division, verse_to_highlight, division_0, division_1, division_2, division_3, division_4 = parse_reference_and_get_division_and_verse('[ :]+', escaped_ref, work_alias.work, divisions_with_spaces)
908

    
909
    l = [division_0, division_1, division_2, division_3, division_4]
910

    
911
    args.extend( [x for x in l if x is not None] )
912

    
913
    return render_api_response(request, { 'url' : reverse('read_work', args=args), 'verse_to_highlight' : verse_to_highlight } )