Revision 3219
Added by berkley over 17 years ago
src/edu/ucsb/nceas/metacat/MetaCatServlet.java | ||
---|---|---|
605 | 605 |
name = (String) sess.getAttribute("name"); |
606 | 606 |
} |
607 | 607 |
|
608 |
logMetacat.info("&*&*&*&*&*&*&*&*&*&*&*&*&*&*&&*&*SESSIONID1: " + sess_id); |
|
609 |
|
|
610 | 608 |
//make user user username should be public |
611 | 609 |
if (username == null || (username.trim().equals(""))) { |
612 | 610 |
username = "public"; |
src/edu/ucsb/nceas/metacat/DBQuery.java | ||
---|---|---|
33 | 33 |
import java.io.BufferedWriter; |
34 | 34 |
import java.io.File; |
35 | 35 |
import java.io.FileInputStream; |
36 |
import java.io.FileOutputStream; |
|
36 | 37 |
import java.io.FileReader; |
37 | 38 |
import java.io.FileWriter; |
38 | 39 |
import java.io.IOException; |
... | ... | |
40 | 41 |
import java.io.PrintWriter; |
41 | 42 |
import java.io.StringReader; |
42 | 43 |
import java.io.StringWriter; |
44 |
import java.io.OutputStream; |
|
43 | 45 |
import java.sql.PreparedStatement; |
44 | 46 |
import java.sql.ResultSet; |
45 | 47 |
import java.sql.SQLException; |
... | ... | |
56 | 58 |
|
57 | 59 |
import org.apache.log4j.Logger; |
58 | 60 |
|
61 |
import org.w3c.dom.*; |
|
62 |
import javax.xml.parsers.DocumentBuilderFactory; |
|
63 |
import org.xml.sax.InputSource; |
|
64 |
import org.w3c.dom.ls.*; |
|
65 |
|
|
59 | 66 |
import edu.ucsb.nceas.morpho.datapackage.Triple; |
60 | 67 |
import edu.ucsb.nceas.morpho.datapackage.TripleCollection; |
61 | 68 |
|
... | ... | |
323 | 330 |
DBTransform trans = new DBTransform(); |
324 | 331 |
response.setContentType("text/html"); |
325 | 332 |
|
326 |
// if the user is a moderator, then pass a param to the
|
|
333 |
// if the user is a moderator, then pass a param to the
|
|
327 | 334 |
// xsl specifying the fact |
328 | 335 |
if(MetaCatUtil.isModerator(user, groups)){ |
329 | 336 |
params.put("isModerator", new String[] {"true"}); |
... | ... | |
342 | 349 |
|
343 | 350 |
}//else |
344 | 351 |
|
352 |
} |
|
353 |
|
|
354 |
/** |
|
355 |
* this method parses the xml results in the string buffer and returns |
|
356 |
* just those required by the paging params. |
|
357 |
*/ |
|
358 |
private StringBuffer getPagedResult(MetacatResultSet mrs, int pagestart, |
|
359 |
int pagesize) |
|
360 |
{ |
|
361 |
logMetacat.warn(mrs.toString()); |
|
362 |
if(pagesize == 0) |
|
363 |
{ //if pagesize is 0 then we return the whole resultset |
|
364 |
return new StringBuffer(mrs.toString()); |
|
345 | 365 |
} |
366 |
|
|
367 |
return new StringBuffer(mrs.serializeToXML(pagestart, pagestart + pagesize)); |
|
368 |
} |
|
346 | 369 |
|
347 | 370 |
/* |
348 | 371 |
* Transforms a hashtable of documents to an xml or html result and sent |
... | ... | |
360 | 383 |
DBConnection dbconn = null; |
361 | 384 |
int serialNumber = -1; |
362 | 385 |
StringBuffer resultset = new StringBuffer(); |
386 |
|
|
387 |
//try to get the cached version first |
|
388 |
Hashtable sessionHash = MetaCatServlet.getSessionHash(); |
|
389 |
HttpSession sess = (HttpSession)sessionHash.get(sessionid); |
|
390 |
|
|
391 |
QuerySpecification cachedQuerySpec = (QuerySpecification)sess.getAttribute("query"); |
|
392 |
if(cachedQuerySpec != null && |
|
393 |
cachedQuerySpec.printSQL(false).equals(qspec.printSQL(false))) |
|
394 |
{ //use the cached resultset if the query was the same as the last |
|
395 |
MetacatResultSet mrs = (MetacatResultSet)sess.getAttribute("results"); |
|
396 |
logMetacat.info("Using cached query results"); |
|
397 |
//if the query is the same and the session contains the query |
|
398 |
//results, return those instead of rerunning the query |
|
399 |
if(mrs != null) |
|
400 |
{ //print and return the cached buffer |
|
401 |
StringBuffer pagedResultBuffer = getPagedResult(mrs, pagestart, |
|
402 |
pagesize); |
|
403 |
if(out != null) |
|
404 |
{ |
|
405 |
out.println("<?xml version=\"1.0\"?>\n"); |
|
406 |
out.println("<resultset>\n"); |
|
407 |
out.println(" <query>" + xmlquery + "</query>\n"); |
|
408 |
out.println(pagedResultBuffer.toString()); |
|
409 |
out.println("\n</resultset>\n"); |
|
410 |
} |
|
411 |
String returnString = "<?xml version=\"1.0\"?>\n"; |
|
412 |
returnString += "<resultset>\n"; |
|
413 |
returnString += " <query>" + xmlquery + "</query>\n"; |
|
414 |
returnString += pagedResultBuffer.toString(); |
|
415 |
returnString += "\n</resultset>\n"; |
|
416 |
return new StringBuffer(returnString); |
|
417 |
} |
|
418 |
} |
|
419 |
|
|
420 |
//no cached results...go on with a normal query |
|
421 |
|
|
363 | 422 |
resultset.append("<?xml version=\"1.0\"?>\n"); |
364 | 423 |
resultset.append("<resultset>\n"); |
365 | 424 |
resultset.append(" <query>" + xmlquery + "</query>"); |
366 |
// sent query part out
|
|
425 |
//send out a new query
|
|
367 | 426 |
if (out != null) |
368 | 427 |
{ |
369 | 428 |
out.println(resultset.toString()); |
... | ... | |
383 | 442 |
dbconn, useXMLIndex, pagesize, pagestart, |
384 | 443 |
sessionid); |
385 | 444 |
|
386 |
|
|
387 |
|
|
388 | 445 |
} //try |
389 | 446 |
catch (IOException ioe) |
390 | 447 |
{ |
... | ... | |
401 | 458 |
{ |
402 | 459 |
logMetacat.error("Exception in DBQuery.findDocuments: " |
403 | 460 |
+ ee.getMessage()); |
461 |
ee.printStackTrace(); |
|
404 | 462 |
} |
405 | 463 |
finally |
406 | 464 |
{ |
... | ... | |
414 | 472 |
out.println(closeRestultset); |
415 | 473 |
} |
416 | 474 |
|
475 |
//create a DOM to cache |
|
476 |
try |
|
477 |
{ |
|
478 |
|
|
479 |
//cache the query result and the query |
|
480 |
logMetacat.info("Caching query and resultset"); |
|
481 |
sess.setAttribute("query", qspec); |
|
482 |
MetacatResultSet mrs = processAndCacheResults(resultset.toString(), sess); |
|
483 |
sess.setAttribute("results", mrs); |
|
484 |
StringBuffer pagedResultBuffer = getPagedResult(mrs, pagestart, pagesize); |
|
485 |
String returnString = "<?xml version=\"1.0\"?>\n"; |
|
486 |
returnString += "<resultset>\n"; |
|
487 |
returnString += " <query>" + xmlquery + "</query>\n"; |
|
488 |
returnString += pagedResultBuffer.toString(); |
|
489 |
returnString += "\n</resultset>\n"; |
|
490 |
return new StringBuffer(returnString); |
|
491 |
} |
|
492 |
catch(Exception e) |
|
493 |
{ |
|
494 |
logMetacat.error("################Could not parse resultset: " + e.getMessage()); |
|
495 |
} |
|
496 |
|
|
417 | 497 |
return resultset; |
418 | 498 |
}//createResultDocuments |
419 | 499 |
|
500 |
/** |
|
501 |
* parse the dom of the resultset into a MetacatResultSet object so it can |
|
502 |
* be cached in a reasonable way |
|
503 |
*/ |
|
504 |
private MetacatResultSet processAndCacheResults(String resultset, HttpSession sess) |
|
505 |
throws Exception |
|
506 |
{ |
|
507 |
StringReader sreader = new StringReader(resultset.toString()); |
|
508 |
InputSource inputsource = new InputSource(sreader); |
|
509 |
logMetacat.warn("processing DOM"); |
|
510 |
Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(inputsource); |
|
511 |
//got the dom, now process it into an MRS |
|
512 |
MetacatResultSet mrs = new MetacatResultSet(doc); |
|
513 |
return mrs; |
|
514 |
} |
|
420 | 515 |
|
421 |
|
|
422 | 516 |
/* |
423 | 517 |
* Find the doc list which match the query |
424 | 518 |
*/ |
... | ... | |
430 | 524 |
int pagesize, int pagestart, String sessionid) |
431 | 525 |
throws Exception |
432 | 526 |
{ |
433 |
/* |
|
434 |
if pagesize != 0 then we need to process the query results in pages |
|
435 |
1) check to see what the sessionid is: look in MetacatServlet.getSessionHash() |
|
436 |
2) lookup the sessionid and the query in the paged_results table |
|
437 |
3) if there is already a page result for the session and query get that |
|
438 |
result and look at what our pagesize and pagestart is to get the next |
|
439 |
pagesize results |
|
440 |
4) if there is not a cached result, do the query, put the result in the |
|
441 |
cache under the correct sessionid and return 0..pagesize results |
|
442 |
5) when the session expires or is logged out, delete the cached queryresults |
|
527 |
String query = null; |
|
528 |
int count = 0; |
|
529 |
int index = 0; |
|
530 |
Hashtable docListResult = new Hashtable(); |
|
531 |
PreparedStatement pstmt = null; |
|
532 |
String docid = null; |
|
533 |
String docname = null; |
|
534 |
String doctype = null; |
|
535 |
String createDate = null; |
|
536 |
String updateDate = null; |
|
537 |
StringBuffer document = null; |
|
538 |
int rev = 0; |
|
539 |
double startTime = 0; |
|
540 |
int offset = 1; |
|
443 | 541 |
|
444 |
|
|
445 |
paged_results |
|
446 |
------------- |
|
447 |
sessionid (PK) (String) |
|
448 |
query (String) |
|
449 |
resultset (String) |
|
450 |
|
|
451 |
*/ |
|
452 |
Hashtable sessionHash = MetaCatServlet.getSessionHash(); |
|
453 |
HttpSession sess = (HttpSession)sessionHash.get(sessionid); |
|
454 |
//now we have the session object, so we can cache the query there. |
|
455 |
|
|
456 |
int offset = 1; |
|
542 |
ResultSet rs = null; |
|
543 |
|
|
544 |
offset = 1; |
|
457 | 545 |
// this is a hack for offset |
458 | 546 |
if (out == null) |
459 | 547 |
{ |
... | ... | |
467 | 555 |
(new Integer(MetaCatUtil.getOption("app_resultsetsize"))).intValue(); |
468 | 556 |
} |
469 | 557 |
|
470 |
int count = 0; |
|
471 |
int index = 0; |
|
472 |
Hashtable docListResult = new Hashtable(); |
|
473 |
PreparedStatement pstmt = null; |
|
474 |
String docid = null; |
|
475 |
String docname = null; |
|
476 |
String doctype = null; |
|
477 |
String createDate = null; |
|
478 |
String updateDate = null; |
|
479 |
StringBuffer document = null; |
|
480 |
int rev = 0; |
|
481 |
|
|
482 |
String query = null; |
|
483 |
|
|
484 | 558 |
/* |
485 | 559 |
* Check the docidOverride Vector |
486 | 560 |
* if defined, we bypass the qspec.printSQL() method |
... | ... | |
523 | 597 |
logMetacat.warn("\n\n\n final query: " + query); |
524 | 598 |
} |
525 | 599 |
|
526 |
double startTime = System.currentTimeMillis() / 1000;
|
|
600 |
startTime = System.currentTimeMillis() / 1000; |
|
527 | 601 |
pstmt = dbconn.prepareStatement(query); |
528 |
|
|
529 |
// Execute the SQL query using the JDBC connection |
|
530 |
pstmt.execute(); |
|
531 |
ResultSet rs = pstmt.getResultSet(); |
|
532 |
|
|
602 |
rs = pstmt.executeQuery(); |
|
603 |
//now we need to process the resultset based on pagesize and pagestart |
|
604 |
//if they are not 0 |
|
533 | 605 |
double queryExecuteTime = System.currentTimeMillis() / 1000; |
534 |
logMetacat.warn("Pagesize: " + pstmt.getFetchSize()); |
|
535 |
logMetacat.warn("Time for execute query: " |
|
606 |
logMetacat.warn("Time to execute query: " |
|
536 | 607 |
+ (queryExecuteTime - startTime)); |
537 | 608 |
boolean tableHasRows = rs.next(); |
538 | 609 |
while (tableHasRows) |
... | ... | |
549 | 620 |
Vector returndocVec = qspec.getReturnDocList(); |
550 | 621 |
if (returndocVec.size() != 0 && !returndocVec.contains(doctype) |
551 | 622 |
&& !qspec.isPercentageSearch()) |
552 |
{ |
|
623 |
{
|
|
553 | 624 |
logMetacat.warn("Back tracing now..."); |
554 | 625 |
String sep = MetaCatUtil.getOption("accNumSeparator"); |
555 | 626 |
StringBuffer btBuf = new StringBuffer(); |
... | ... | |
665 | 736 |
if (docname != null) |
666 | 737 |
{ |
667 | 738 |
document.append("<docname>" + docname + "</docname>"); |
668 |
}
|
|
669 |
if (doctype != null)
|
|
670 |
{
|
|
671 |
document.append("<doctype>" + doctype + "</doctype>");
|
|
672 |
}
|
|
673 |
if (createDate != null)
|
|
674 |
{
|
|
675 |
document.append("<createdate>" + createDate + "</createdate>");
|
|
676 |
}
|
|
677 |
if (updateDate != null)
|
|
678 |
{
|
|
679 |
document.append("<updatedate>" + updateDate + "</updatedate>");
|
|
680 |
}
|
|
681 |
// Store the document id and the root node id
|
|
682 |
docListResult.put(docid, (String) document.toString());
|
|
683 |
count++;
|
|
739 |
} |
|
740 |
if (doctype != null) |
|
741 |
{ |
|
742 |
document.append("<doctype>" + doctype + "</doctype>"); |
|
743 |
} |
|
744 |
if (createDate != null) |
|
745 |
{ |
|
746 |
document.append("<createdate>" + createDate + "</createdate>"); |
|
747 |
} |
|
748 |
if (updateDate != null) |
|
749 |
{ |
|
750 |
document.append("<updatedate>" + updateDate + "</updatedate>"); |
|
751 |
} |
|
752 |
// Store the document id and the root node id |
|
753 |
docListResult.put(docid, (String) document.toString()); |
|
754 |
count++; |
|
684 | 755 |
|
685 |
|
|
686 | 756 |
}//else |
687 | 757 |
// when doclist reached the offset number, send out doc list and empty |
688 | 758 |
// the hash table |
... | ... | |
711 | 781 |
logMetacat.warn("prepare docid list time: " |
712 | 782 |
+ (docListTime - queryExecuteTime)); |
713 | 783 |
|
714 |
|
|
715 | 784 |
return resultsetBuffer; |
716 | 785 |
}//findReturnDoclist |
717 | 786 |
|
src/edu/ucsb/nceas/metacat/MetacatResultSet.java | ||
---|---|---|
1 |
/** |
|
2 |
* '$RCSfile$' |
|
3 |
* Purpose: A Class that searches a relational DB for elements and |
|
4 |
* attributes that have free text matches a query string, |
|
5 |
* or structured query matches to a path specified node in the |
|
6 |
* XML hierarchy. It returns a result set consisting of the |
|
7 |
* document ID for each document that satisfies the query |
|
8 |
* Copyright: 2000 Regents of the University of California and the |
|
9 |
* National Center for Ecological Analysis and Synthesis |
|
10 |
* Authors: Matt Jones |
|
11 |
* |
|
12 |
* '$Author$' |
|
13 |
* '$Date$' |
|
14 |
* '$Revision$' |
|
15 |
* |
|
16 |
* This program is free software; you can redistribute it and/or modify |
|
17 |
* it under the terms of the GNU General Public License as published by |
|
18 |
* the Free Software Foundation; either version 2 of the License, or |
|
19 |
* (at your option) any later version. |
|
20 |
* |
|
21 |
* This program is distributed in the hope that it will be useful, |
|
22 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
23 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
24 |
* GNU General Public License for more details. |
|
25 |
* |
|
26 |
* You should have received a copy of the GNU General Public License |
|
27 |
* along with this program; if not, write to the Free Software |
|
28 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
29 |
*/ |
|
30 |
|
|
31 |
package edu.ucsb.nceas.metacat; |
|
32 |
|
|
33 |
import java.io.*; |
|
34 |
import java.util.*; |
|
35 |
|
|
36 |
import org.w3c.dom.*; |
|
37 |
import org.apache.xpath.*; |
|
38 |
|
|
39 |
import org.apache.log4j.Logger; |
|
40 |
|
|
41 |
/** |
|
42 |
* this class implements a metacat resultset and can be serialized to xml |
|
43 |
* for printing to the servlet output stream. |
|
44 |
*/ |
|
45 |
public class MetacatResultSet |
|
46 |
{ |
|
47 |
private Logger log = Logger.getLogger(MetacatResultSet.class); |
|
48 |
private Vector results = new Vector(); |
|
49 |
|
|
50 |
/** |
|
51 |
* default constructor |
|
52 |
*/ |
|
53 |
public MetacatResultSet() |
|
54 |
{ |
|
55 |
|
|
56 |
} |
|
57 |
|
|
58 |
/** |
|
59 |
* constructor that can process a dom. this is very slow. |
|
60 |
*/ |
|
61 |
public MetacatResultSet(Document d) |
|
62 |
throws Exception |
|
63 |
{ |
|
64 |
log.warn("processing resultset..."); |
|
65 |
NodeList nl = XPathAPI.selectNodeList(d, "//document"); |
|
66 |
for(int i=0; i<nl.getLength(); i++) |
|
67 |
{ //get each of the document nodes |
|
68 |
Node docNode = nl.item(i); |
|
69 |
Node docidNode = XPathAPI.selectSingleNode(docNode, "./docid"); |
|
70 |
log.warn("processing " + docidNode.getFirstChild().getNodeValue()); |
|
71 |
Node docnameNode = XPathAPI.selectSingleNode(docNode, "./docname"); |
|
72 |
Node doctypeNode = XPathAPI.selectSingleNode(docNode, "./doctype"); |
|
73 |
Node createdateNode = XPathAPI.selectSingleNode(docNode, "./createdate"); |
|
74 |
Node updatedateNode = XPathAPI.selectSingleNode(docNode, "./updatedate"); |
|
75 |
//process the returnfields |
|
76 |
NodeList returnfieldNL = XPathAPI.selectNodeList(docNode, "./param"); |
|
77 |
Hashtable returnfieldHash = new Hashtable(); |
|
78 |
for(int j=0; j<returnfieldNL.getLength(); j++) |
|
79 |
{ |
|
80 |
Node returnfieldNode = returnfieldNL.item(j); |
|
81 |
Node nameNode = XPathAPI.selectSingleNode(returnfieldNode, "@name"); |
|
82 |
String value = returnfieldNode.getFirstChild().getNodeValue(); |
|
83 |
String name = nameNode.getNodeValue(); |
|
84 |
returnfieldHash.put(name, value); |
|
85 |
} |
|
86 |
|
|
87 |
Result r = new Result(docidNode.getFirstChild().getNodeValue(), |
|
88 |
docnameNode.getFirstChild().getNodeValue(), |
|
89 |
doctypeNode.getFirstChild().getNodeValue(), |
|
90 |
createdateNode.getFirstChild().getNodeValue(), |
|
91 |
updatedateNode.getFirstChild().getNodeValue(), |
|
92 |
returnfieldHash); |
|
93 |
addResult(r); |
|
94 |
} |
|
95 |
} |
|
96 |
|
|
97 |
/** |
|
98 |
* add a new result to the resultSet |
|
99 |
*/ |
|
100 |
public void addResult(Result r) |
|
101 |
{ |
|
102 |
results.addElement(r); |
|
103 |
} |
|
104 |
|
|
105 |
/** |
|
106 |
* returns a vector of the results |
|
107 |
*/ |
|
108 |
public Vector getResults() |
|
109 |
{ |
|
110 |
return results; |
|
111 |
} |
|
112 |
|
|
113 |
/** |
|
114 |
* serialize a selection of the results. This will print the results from |
|
115 |
* start to end-1. if end is 0, nothing will be printed. |
|
116 |
*/ |
|
117 |
public String serializeToXML(int start, int end) |
|
118 |
{ |
|
119 |
StringBuffer sb = new StringBuffer(); |
|
120 |
if(start > results.size() || end > results.size()) |
|
121 |
{ //make sure we don't go over the edge of the vector |
|
122 |
start = results.size() - 10; |
|
123 |
end = results.size(); |
|
124 |
} |
|
125 |
|
|
126 |
for(int i=start; i<end; i++) |
|
127 |
{ |
|
128 |
Result r = (Result)results.elementAt(i); |
|
129 |
sb.append(r.toString()); |
|
130 |
sb.append("\n"); |
|
131 |
} |
|
132 |
return sb.toString(); |
|
133 |
} |
|
134 |
|
|
135 |
/** |
|
136 |
* returns an xml representation of this object |
|
137 |
*/ |
|
138 |
public String toString() |
|
139 |
{ |
|
140 |
StringBuffer sb = new StringBuffer(); |
|
141 |
for(int i=0; i<results.size(); i++) |
|
142 |
{ |
|
143 |
Result r = (Result)results.elementAt(i); |
|
144 |
sb.append(r.toString()); |
|
145 |
sb.append("\n"); |
|
146 |
} |
|
147 |
return sb.toString(); |
|
148 |
} |
|
149 |
|
|
150 |
|
|
151 |
/** |
|
152 |
* a class to store one result |
|
153 |
*/ |
|
154 |
public class Result |
|
155 |
{ |
|
156 |
private String docid; |
|
157 |
private String docname; |
|
158 |
private String doctype; |
|
159 |
private String createDate; |
|
160 |
private String updateDate; |
|
161 |
private Hashtable returnfields; |
|
162 |
|
|
163 |
/** |
|
164 |
* constructor |
|
165 |
*/ |
|
166 |
public Result(String docid, String docname, String doctype, |
|
167 |
String createDate, String updateDate, Hashtable returnfields) |
|
168 |
{ |
|
169 |
this.docid = docid; |
|
170 |
this.doctype = doctype; |
|
171 |
this.createDate = createDate; |
|
172 |
this.updateDate = updateDate; |
|
173 |
this.returnfields = returnfields; |
|
174 |
} |
|
175 |
|
|
176 |
/** |
|
177 |
* returns serialized version of this result |
|
178 |
*/ |
|
179 |
public String toString() |
|
180 |
{ |
|
181 |
StringBuffer sb = new StringBuffer(); |
|
182 |
sb.append("<document>\n"); |
|
183 |
sb.append(" <docid>" + docid + "</docid>\n"); |
|
184 |
sb.append(" <docname>" + docname + "</docname>\n"); |
|
185 |
sb.append(" <doctype>" + doctype + "</doctype>\n"); |
|
186 |
sb.append(" <createdate>" + createDate + "</createdate>\n"); |
|
187 |
sb.append(" <updatedate>" + updateDate + "</updatedate>\n"); |
|
188 |
|
|
189 |
Enumeration keys = returnfields.keys(); |
|
190 |
while(keys.hasMoreElements()) |
|
191 |
{ |
|
192 |
String key = (String)keys.nextElement(); |
|
193 |
String value = (String)returnfields.get(key); |
|
194 |
sb.append(" <param name=\"" + key + "\">" + value + "</param>\n"); |
|
195 |
} |
|
196 |
sb.append("</document>"); |
|
197 |
return sb.toString(); |
|
198 |
} |
|
199 |
} |
|
200 |
} |
|
0 | 201 |
Also available in: Unified diff
got paging working. metacat also now caches resultsets for users' sessions. the paging is a bit slow, due to some xpath statements. i need to get this optimized now.