Project

General

Profile

1
package edu.ucsb.nceas.metacat.admin.upgrade;
2
/**
3
 *  '$RCSfile$'
4
 *    Purpose: A Class for upgrading the database to version 1.5
5
 *  Copyright: 2000 Regents of the University of California and the
6
 *             National Center for Ecological Analysis and Synthesis
7
 *    Authors: Saurabh Garg
8
 *
9
 *   '$Author: leinfelder $'
10
 *     '$Date: 2011-03-29 18:23:38 +0000 (Tue, 29 Mar 2011) $'
11
 * '$Revision: 6025 $'
12
 *
13
 * This program is free software; you can redistribute it and/or modify
14
 * it under the terms of the GNU General Public License as published by
15
 * the Free Software Foundation; either version 2 of the License, or
16
 * (at your option) any later version.
17
 *
18
 * This program is distributed in the hope that it will be useful,
19
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
 * GNU General Public License for more details.
22
 *
23
 * You should have received a copy of the GNU General Public License
24
 * along with this program; if not, write to the Free Software
25
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
 */
27

    
28

    
29
import java.util.Collections;
30
import java.util.List;
31
import java.util.concurrent.ExecutorService;
32
import java.util.concurrent.Executors;
33
import java.util.concurrent.TimeUnit;
34

    
35
import org.apache.commons.logging.Log;
36
import org.apache.commons.logging.LogFactory;
37

    
38
import edu.ucsb.nceas.metacat.DBUtil;
39
import edu.ucsb.nceas.metacat.admin.AdminException;
40
import edu.ucsb.nceas.metacat.dataone.SystemMetadataFactory;
41
import edu.ucsb.nceas.metacat.dataone.hazelcast.HazelcastService;
42
import edu.ucsb.nceas.metacat.properties.PropertyService;
43
import edu.ucsb.nceas.utilities.SortedProperties;
44

    
45
public class GenerateSystemMetadata implements UpgradeUtilityInterface {
46

    
47
	private static Log log = LogFactory.getLog(GenerateSystemMetadata.class);
48
	
49
    public boolean upgrade() throws AdminException {
50
    	
51
    	// do this in a thread too so that we don't have to hang the UI (web)
52
    	ExecutorService executor = Executors.newSingleThreadExecutor();
53
    	Runnable command = new Runnable() {
54
			@Override
55
			public void run() {
56
				// just run it
57
				try {
58
					boolean success = multiThreadUpgrade();
59
				} catch (AdminException e) {
60
					throw new RuntimeException(e);
61
				}
62
			}
63
    	};
64
		executor.execute(command);
65
		executor.shutdown();
66
		
67
		// wait for it to finish before returning?
68
        boolean wait = false;
69
        if (wait) {
70
            log.debug("Waiting for upgrade to complete");
71
            try {
72
				executor.awaitTermination(Long.MAX_VALUE, TimeUnit.HOURS);
73
			} catch (InterruptedException e) {
74
				AdminException ae = new AdminException(e.getMessage());
75
				ae.initCause(e);
76
				throw ae;
77
			}
78
            log.debug("Done waiting for upgrade thread");
79
        }
80
		
81
    	return true;
82
        //return singleThreadUpgrade();
83
    }
84
    
85
    /**
86
     * Use multiple threads to process parts of the complete ID list concurrently
87
     * @return
88
     * @throws AdminException
89
     */
90
    public boolean multiThreadUpgrade() throws AdminException {
91
    	
92
        boolean success = true;
93
        
94
        // do not include ORE or data, but can generate SystemMetadata for ALL records
95
        final boolean includeOre = false;
96
        final boolean downloadData = false;
97
        int serverLocation = -1;
98
        
99
        try {
100
        	
101
        	// get list of ALL docids at ALL server locations
102
            List<String> idList = DBUtil.getAllDocidsByType(null, true, serverLocation);
103
            Collections.sort(idList);
104
            
105
            // executor
106
            int availableProcessors = Runtime.getRuntime().availableProcessors();
107
            int nThreads = availableProcessors * 1;
108
            //availableProcessors++;
109
        	log.debug("Using nThreads: " + nThreads);
110

    
111
            ExecutorService executor = Executors.newFixedThreadPool(nThreads);
112
            int taskCount = 0;
113

    
114
            // init HZ
115
            log.debug("Making sure Hazelcast is up");
116
            HazelcastService.getInstance();
117
            
118
            // chunk into groups
119
			int fromIndex = 0;
120
            int toIndex = 0;
121
            String prefix = null;
122
            for (String docid: idList) {
123

    
124
            	// increment the next entry, exclusive
125
            	toIndex++;
126
            	
127
            	// use scope.docid (without revision) to determine groups
128
            	if (prefix == null || !docid.startsWith(prefix)) {
129
            		
130
            		// construct a sublist for this previous group of docids
131
					final List<String> subList = idList.subList(fromIndex, toIndex);
132
	            	log.debug("Grouping docid prefix: " + prefix);
133
					log.debug("subList.size: " + subList.size());
134
					
135
					// add the task for this sublist
136
					Runnable command = new Runnable() {
137
						@Override
138
						public void run() {
139
							// generate based on this list
140
				            try {
141
				            	log.debug("Processing subList.size: " + subList.size());
142
								SystemMetadataFactory.generateSystemMetadata(subList, includeOre, downloadData);
143
								log.debug("Done processing subList.size: " + subList.size());
144
								
145
							} catch (Exception e) {
146
								throw new RuntimeException(e);
147
							}
148
						}
149
					};
150
					
151
					// execute the task 
152
					executor.execute(command);
153
					taskCount++;
154
					
155
					// start at the end of this sublist
156
					fromIndex = toIndex;
157

    
158
            	}
159

    
160
            	log.debug("docid: " + docid);
161

    
162
            	// get the previous docid prefix
163
            	String previousId = docid;
164
            	prefix = previousId.substring(0, previousId.lastIndexOf("."));
165
				
166
            }
167

    
168
            log.info("done launching threaded tasks, count: " + taskCount);
169

    
170
            // wait for executor to finish
171
            executor.shutdown();
172
            
173
			// wait a long time
174
            log.debug("Waiting for all threads to complete");
175
            executor.awaitTermination(Long.MAX_VALUE, TimeUnit.HOURS);
176
            log.debug("Done waiting for all threads to complete");
177
            // now we are ready to be a data one node
178
            PropertyService.setProperty("dataone.systemmetadata.generated", Boolean.TRUE.toString());
179
            
180
		} catch (Exception e) {
181
			String msg = "Problem generating missing system metadata: " + e.getMessage();
182
			log.error(msg, e);
183
			success = false;
184
			throw new AdminException(msg);
185
		}
186
    	return success;
187
    }
188
    
189
    public static void main(String [] ags){
190

    
191
        try {
192
        	// set up the properties based on the test/deployed configuration of the workspace
193
        	SortedProperties testProperties = 
194
				new SortedProperties("test/test.properties");
195
			testProperties.load();
196
			String metacatContextDir = testProperties.getProperty("metacat.contextDir");
197
			PropertyService.getInstance(metacatContextDir + "/WEB-INF");
198
			// now run it
199
            GenerateSystemMetadata upgrader = new GenerateSystemMetadata();
200
	        upgrader.upgrade();
201
	        
202
        } catch (Exception ex) {
203
            System.out.println("Exception:" + ex.getMessage());
204
            ex.printStackTrace();
205
        }
206
    }
207
}
(3-3/8)