Revision 3219
Added by berkley about 17 years ago
DBQuery.java | ||
---|---|---|
33 | 33 |
import java.io.BufferedWriter; |
34 | 34 |
import java.io.File; |
35 | 35 |
import java.io.FileInputStream; |
36 |
import java.io.FileOutputStream; |
|
36 | 37 |
import java.io.FileReader; |
37 | 38 |
import java.io.FileWriter; |
38 | 39 |
import java.io.IOException; |
... | ... | |
40 | 41 |
import java.io.PrintWriter; |
41 | 42 |
import java.io.StringReader; |
42 | 43 |
import java.io.StringWriter; |
44 |
import java.io.OutputStream; |
|
43 | 45 |
import java.sql.PreparedStatement; |
44 | 46 |
import java.sql.ResultSet; |
45 | 47 |
import java.sql.SQLException; |
... | ... | |
56 | 58 |
|
57 | 59 |
import org.apache.log4j.Logger; |
58 | 60 |
|
61 |
import org.w3c.dom.*; |
|
62 |
import javax.xml.parsers.DocumentBuilderFactory; |
|
63 |
import org.xml.sax.InputSource; |
|
64 |
import org.w3c.dom.ls.*; |
|
65 |
|
|
59 | 66 |
import edu.ucsb.nceas.morpho.datapackage.Triple; |
60 | 67 |
import edu.ucsb.nceas.morpho.datapackage.TripleCollection; |
61 | 68 |
|
... | ... | |
323 | 330 |
DBTransform trans = new DBTransform(); |
324 | 331 |
response.setContentType("text/html"); |
325 | 332 |
|
326 |
// if the user is a moderator, then pass a param to the
|
|
333 |
// if the user is a moderator, then pass a param to the
|
|
327 | 334 |
// xsl specifying the fact |
328 | 335 |
if(MetaCatUtil.isModerator(user, groups)){ |
329 | 336 |
params.put("isModerator", new String[] {"true"}); |
... | ... | |
342 | 349 |
|
343 | 350 |
}//else |
344 | 351 |
|
352 |
} |
|
353 |
|
|
354 |
/** |
|
355 |
* this method parses the xml results in the string buffer and returns |
|
356 |
* just those required by the paging params. |
|
357 |
*/ |
|
358 |
private StringBuffer getPagedResult(MetacatResultSet mrs, int pagestart, |
|
359 |
int pagesize) |
|
360 |
{ |
|
361 |
logMetacat.warn(mrs.toString()); |
|
362 |
if(pagesize == 0) |
|
363 |
{ //if pagesize is 0 then we return the whole resultset |
|
364 |
return new StringBuffer(mrs.toString()); |
|
345 | 365 |
} |
366 |
|
|
367 |
return new StringBuffer(mrs.serializeToXML(pagestart, pagestart + pagesize)); |
|
368 |
} |
|
346 | 369 |
|
347 | 370 |
/* |
348 | 371 |
* Transforms a hashtable of documents to an xml or html result and sent |
... | ... | |
360 | 383 |
DBConnection dbconn = null; |
361 | 384 |
int serialNumber = -1; |
362 | 385 |
StringBuffer resultset = new StringBuffer(); |
386 |
|
|
387 |
//try to get the cached version first |
|
388 |
Hashtable sessionHash = MetaCatServlet.getSessionHash(); |
|
389 |
HttpSession sess = (HttpSession)sessionHash.get(sessionid); |
|
390 |
|
|
391 |
QuerySpecification cachedQuerySpec = (QuerySpecification)sess.getAttribute("query"); |
|
392 |
if(cachedQuerySpec != null && |
|
393 |
cachedQuerySpec.printSQL(false).equals(qspec.printSQL(false))) |
|
394 |
{ //use the cached resultset if the query was the same as the last |
|
395 |
MetacatResultSet mrs = (MetacatResultSet)sess.getAttribute("results"); |
|
396 |
logMetacat.info("Using cached query results"); |
|
397 |
//if the query is the same and the session contains the query |
|
398 |
//results, return those instead of rerunning the query |
|
399 |
if(mrs != null) |
|
400 |
{ //print and return the cached buffer |
|
401 |
StringBuffer pagedResultBuffer = getPagedResult(mrs, pagestart, |
|
402 |
pagesize); |
|
403 |
if(out != null) |
|
404 |
{ |
|
405 |
out.println("<?xml version=\"1.0\"?>\n"); |
|
406 |
out.println("<resultset>\n"); |
|
407 |
out.println(" <query>" + xmlquery + "</query>\n"); |
|
408 |
out.println(pagedResultBuffer.toString()); |
|
409 |
out.println("\n</resultset>\n"); |
|
410 |
} |
|
411 |
String returnString = "<?xml version=\"1.0\"?>\n"; |
|
412 |
returnString += "<resultset>\n"; |
|
413 |
returnString += " <query>" + xmlquery + "</query>\n"; |
|
414 |
returnString += pagedResultBuffer.toString(); |
|
415 |
returnString += "\n</resultset>\n"; |
|
416 |
return new StringBuffer(returnString); |
|
417 |
} |
|
418 |
} |
|
419 |
|
|
420 |
//no cached results...go on with a normal query |
|
421 |
|
|
363 | 422 |
resultset.append("<?xml version=\"1.0\"?>\n"); |
364 | 423 |
resultset.append("<resultset>\n"); |
365 | 424 |
resultset.append(" <query>" + xmlquery + "</query>"); |
366 |
// sent query part out
|
|
425 |
//send out a new query
|
|
367 | 426 |
if (out != null) |
368 | 427 |
{ |
369 | 428 |
out.println(resultset.toString()); |
... | ... | |
383 | 442 |
dbconn, useXMLIndex, pagesize, pagestart, |
384 | 443 |
sessionid); |
385 | 444 |
|
386 |
|
|
387 |
|
|
388 | 445 |
} //try |
389 | 446 |
catch (IOException ioe) |
390 | 447 |
{ |
... | ... | |
401 | 458 |
{ |
402 | 459 |
logMetacat.error("Exception in DBQuery.findDocuments: " |
403 | 460 |
+ ee.getMessage()); |
461 |
ee.printStackTrace(); |
|
404 | 462 |
} |
405 | 463 |
finally |
406 | 464 |
{ |
... | ... | |
414 | 472 |
out.println(closeRestultset); |
415 | 473 |
} |
416 | 474 |
|
475 |
//create a DOM to cache |
|
476 |
try |
|
477 |
{ |
|
478 |
|
|
479 |
//cache the query result and the query |
|
480 |
logMetacat.info("Caching query and resultset"); |
|
481 |
sess.setAttribute("query", qspec); |
|
482 |
MetacatResultSet mrs = processAndCacheResults(resultset.toString(), sess); |
|
483 |
sess.setAttribute("results", mrs); |
|
484 |
StringBuffer pagedResultBuffer = getPagedResult(mrs, pagestart, pagesize); |
|
485 |
String returnString = "<?xml version=\"1.0\"?>\n"; |
|
486 |
returnString += "<resultset>\n"; |
|
487 |
returnString += " <query>" + xmlquery + "</query>\n"; |
|
488 |
returnString += pagedResultBuffer.toString(); |
|
489 |
returnString += "\n</resultset>\n"; |
|
490 |
return new StringBuffer(returnString); |
|
491 |
} |
|
492 |
catch(Exception e) |
|
493 |
{ |
|
494 |
logMetacat.error("################Could not parse resultset: " + e.getMessage()); |
|
495 |
} |
|
496 |
|
|
417 | 497 |
return resultset; |
418 | 498 |
}//createResultDocuments |
419 | 499 |
|
500 |
/** |
|
501 |
* parse the dom of the resultset into a MetacatResultSet object so it can |
|
502 |
* be cached in a reasonable way |
|
503 |
*/ |
|
504 |
private MetacatResultSet processAndCacheResults(String resultset, HttpSession sess) |
|
505 |
throws Exception |
|
506 |
{ |
|
507 |
StringReader sreader = new StringReader(resultset.toString()); |
|
508 |
InputSource inputsource = new InputSource(sreader); |
|
509 |
logMetacat.warn("processing DOM"); |
|
510 |
Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(inputsource); |
|
511 |
//got the dom, now process it into an MRS |
|
512 |
MetacatResultSet mrs = new MetacatResultSet(doc); |
|
513 |
return mrs; |
|
514 |
} |
|
420 | 515 |
|
421 |
|
|
422 | 516 |
/* |
423 | 517 |
* Find the doc list which match the query |
424 | 518 |
*/ |
... | ... | |
430 | 524 |
int pagesize, int pagestart, String sessionid) |
431 | 525 |
throws Exception |
432 | 526 |
{ |
433 |
/* |
|
434 |
if pagesize != 0 then we need to process the query results in pages |
|
435 |
1) check to see what the sessionid is: look in MetacatServlet.getSessionHash() |
|
436 |
2) lookup the sessionid and the query in the paged_results table |
|
437 |
3) if there is already a page result for the session and query get that |
|
438 |
result and look at what our pagesize and pagestart is to get the next |
|
439 |
pagesize results |
|
440 |
4) if there is not a cached result, do the query, put the result in the |
|
441 |
cache under the correct sessionid and return 0..pagesize results |
|
442 |
5) when the session expires or is logged out, delete the cached queryresults |
|
527 |
String query = null; |
|
528 |
int count = 0; |
|
529 |
int index = 0; |
|
530 |
Hashtable docListResult = new Hashtable(); |
|
531 |
PreparedStatement pstmt = null; |
|
532 |
String docid = null; |
|
533 |
String docname = null; |
|
534 |
String doctype = null; |
|
535 |
String createDate = null; |
|
536 |
String updateDate = null; |
|
537 |
StringBuffer document = null; |
|
538 |
int rev = 0; |
|
539 |
double startTime = 0; |
|
540 |
int offset = 1; |
|
443 | 541 |
|
444 |
|
|
445 |
paged_results |
|
446 |
------------- |
|
447 |
sessionid (PK) (String) |
|
448 |
query (String) |
|
449 |
resultset (String) |
|
450 |
|
|
451 |
*/ |
|
452 |
Hashtable sessionHash = MetaCatServlet.getSessionHash(); |
|
453 |
HttpSession sess = (HttpSession)sessionHash.get(sessionid); |
|
454 |
//now we have the session object, so we can cache the query there. |
|
455 |
|
|
456 |
int offset = 1; |
|
542 |
ResultSet rs = null; |
|
543 |
|
|
544 |
offset = 1; |
|
457 | 545 |
// this is a hack for offset |
458 | 546 |
if (out == null) |
459 | 547 |
{ |
... | ... | |
467 | 555 |
(new Integer(MetaCatUtil.getOption("app_resultsetsize"))).intValue(); |
468 | 556 |
} |
469 | 557 |
|
470 |
int count = 0; |
|
471 |
int index = 0; |
|
472 |
Hashtable docListResult = new Hashtable(); |
|
473 |
PreparedStatement pstmt = null; |
|
474 |
String docid = null; |
|
475 |
String docname = null; |
|
476 |
String doctype = null; |
|
477 |
String createDate = null; |
|
478 |
String updateDate = null; |
|
479 |
StringBuffer document = null; |
|
480 |
int rev = 0; |
|
481 |
|
|
482 |
String query = null; |
|
483 |
|
|
484 | 558 |
/* |
485 | 559 |
* Check the docidOverride Vector |
486 | 560 |
* if defined, we bypass the qspec.printSQL() method |
... | ... | |
523 | 597 |
logMetacat.warn("\n\n\n final query: " + query); |
524 | 598 |
} |
525 | 599 |
|
526 |
double startTime = System.currentTimeMillis() / 1000;
|
|
600 |
startTime = System.currentTimeMillis() / 1000; |
|
527 | 601 |
pstmt = dbconn.prepareStatement(query); |
528 |
|
|
529 |
// Execute the SQL query using the JDBC connection |
|
530 |
pstmt.execute(); |
|
531 |
ResultSet rs = pstmt.getResultSet(); |
|
532 |
|
|
602 |
rs = pstmt.executeQuery(); |
|
603 |
//now we need to process the resultset based on pagesize and pagestart |
|
604 |
//if they are not 0 |
|
533 | 605 |
double queryExecuteTime = System.currentTimeMillis() / 1000; |
534 |
logMetacat.warn("Pagesize: " + pstmt.getFetchSize()); |
|
535 |
logMetacat.warn("Time for execute query: " |
|
606 |
logMetacat.warn("Time to execute query: " |
|
536 | 607 |
+ (queryExecuteTime - startTime)); |
537 | 608 |
boolean tableHasRows = rs.next(); |
538 | 609 |
while (tableHasRows) |
... | ... | |
549 | 620 |
Vector returndocVec = qspec.getReturnDocList(); |
550 | 621 |
if (returndocVec.size() != 0 && !returndocVec.contains(doctype) |
551 | 622 |
&& !qspec.isPercentageSearch()) |
552 |
{ |
|
623 |
{
|
|
553 | 624 |
logMetacat.warn("Back tracing now..."); |
554 | 625 |
String sep = MetaCatUtil.getOption("accNumSeparator"); |
555 | 626 |
StringBuffer btBuf = new StringBuffer(); |
... | ... | |
665 | 736 |
if (docname != null) |
666 | 737 |
{ |
667 | 738 |
document.append("<docname>" + docname + "</docname>"); |
668 |
}
|
|
669 |
if (doctype != null)
|
|
670 |
{
|
|
671 |
document.append("<doctype>" + doctype + "</doctype>");
|
|
672 |
}
|
|
673 |
if (createDate != null)
|
|
674 |
{
|
|
675 |
document.append("<createdate>" + createDate + "</createdate>");
|
|
676 |
}
|
|
677 |
if (updateDate != null)
|
|
678 |
{
|
|
679 |
document.append("<updatedate>" + updateDate + "</updatedate>");
|
|
680 |
}
|
|
681 |
// Store the document id and the root node id
|
|
682 |
docListResult.put(docid, (String) document.toString());
|
|
683 |
count++;
|
|
739 |
} |
|
740 |
if (doctype != null) |
|
741 |
{ |
|
742 |
document.append("<doctype>" + doctype + "</doctype>"); |
|
743 |
} |
|
744 |
if (createDate != null) |
|
745 |
{ |
|
746 |
document.append("<createdate>" + createDate + "</createdate>"); |
|
747 |
} |
|
748 |
if (updateDate != null) |
|
749 |
{ |
|
750 |
document.append("<updatedate>" + updateDate + "</updatedate>"); |
|
751 |
} |
|
752 |
// Store the document id and the root node id |
|
753 |
docListResult.put(docid, (String) document.toString()); |
|
754 |
count++; |
|
684 | 755 |
|
685 |
|
|
686 | 756 |
}//else |
687 | 757 |
// when doclist reached the offset number, send out doc list and empty |
688 | 758 |
// the hash table |
... | ... | |
711 | 781 |
logMetacat.warn("prepare docid list time: " |
712 | 782 |
+ (docListTime - queryExecuteTime)); |
713 | 783 |
|
714 |
|
|
715 | 784 |
return resultsetBuffer; |
716 | 785 |
}//findReturnDoclist |
717 | 786 |
|
Also available in: Unified diff
got paging working. metacat also now caches resultsets for users' sessions. the paging is a bit slow, due to some xpath statements. i need to get this optimized now.