Revision 6876
Added by Chris Jones almost 13 years ago
src/edu/ucsb/nceas/metacat/dataone/CNodeService.java | ||
---|---|---|
256 | 256 |
replicas = systemMetadata.getReplicaList(); |
257 | 257 |
int count = 0; |
258 | 258 |
|
259 |
if ( replicas == null || replicas.size() < 1 ) { |
|
260 |
logMetacat.debug("No replicas to evaluate for " + pid.getValue()); |
|
261 |
Replica newReplica = new Replica(); |
|
262 |
newReplica.setReplicaMemberNode(targetNode); |
|
263 |
newReplica.setReplicationStatus(status); |
|
264 |
newReplica.setReplicaVerified(Calendar.getInstance().getTime()); |
|
265 |
try { |
|
266 |
// if there is no replica entry, create one |
|
267 |
updateReplicationMetadata(session, pid, newReplica, |
|
268 |
systemMetadata.getSerialVersion().longValue()); |
|
269 |
|
|
270 |
} catch (VersionMismatch e) { |
|
271 |
// try again if we somehow don't have the correct version |
|
272 |
String msg = "The serial version of the system metadata doesn't match. Trying again."; |
|
273 |
logMetacat.info(msg); |
|
274 |
systemMetadata = HazelcastService.getInstance().getSystemMetadataMap().get(pid); |
|
275 |
try { |
|
276 |
updateReplicationMetadata(session, pid, newReplica, |
|
277 |
systemMetadata.getSerialVersion().longValue()); |
|
278 |
|
|
279 |
} catch (VersionMismatch e1) { |
|
280 |
throw new ServiceFailure("4700", |
|
281 |
"Couldn't get the correct serial version of the system metadata: " + |
|
282 |
e1.getCause().getMessage()); |
|
283 |
|
|
284 |
} |
|
285 |
|
|
286 |
} |
|
287 |
|
|
259 |
// was there a failure? log it |
|
260 |
if ( failure != null && status == ReplicationStatus.FAILED ) { |
|
261 |
String msg = "The replication request of the object identified by " + |
|
262 |
pid.getValue() + " failed. The error message was " + |
|
263 |
failure.getMessage() + "."; |
|
288 | 264 |
} |
289 | 265 |
|
290 |
// refresh the system metadata and replica list |
|
291 |
systemMetadata = HazelcastService.getInstance().getSystemMetadataMap().get(pid); |
|
292 |
replicas = systemMetadata.getReplicaList(); |
|
266 |
if (replicas.size() > 0 && replicas != null) { |
|
267 |
// find the target replica index in the replica list |
|
268 |
for (Replica replica : replicas) { |
|
269 |
String replicaNodeStr = replica.getReplicaMemberNode() |
|
270 |
.getValue(); |
|
271 |
String targetNodeStr = targetNode.getValue(); |
|
272 |
logMetacat.debug("Comparing " + replicaNodeStr + " to " |
|
273 |
+ targetNodeStr); |
|
293 | 274 |
|
294 |
// find the target replica index in the replica list |
|
295 |
for (Replica replica: replicas) { |
|
296 |
String replicaNodeStr = replica.getReplicaMemberNode().getValue(); |
|
297 |
String targetNodeStr = targetNode.getValue(); |
|
298 |
logMetacat.debug("Comparing " + replicaNodeStr + " to " + targetNodeStr); |
|
275 |
if (replicaNodeStr.equals(targetNodeStr)) { |
|
276 |
replicaEntryIndex = count; |
|
277 |
logMetacat.debug("replica entry index is: " |
|
278 |
+ replicaEntryIndex); |
|
279 |
break; |
|
280 |
} |
|
281 |
count++; |
|
299 | 282 |
|
300 |
if (replicaNodeStr.equals(targetNodeStr)) { |
|
301 |
replicaEntryIndex = count; |
|
302 |
logMetacat.debug("replica entry index is: " + replicaEntryIndex); |
|
303 |
break; |
|
304 | 283 |
} |
305 |
count++; |
|
306 |
|
|
307 | 284 |
} |
308 |
|
|
309 | 285 |
// are we allowed to do this? only CNs and target MNs are allowed |
310 | 286 |
CNode cn = D1Client.getCN(); |
311 | 287 |
List<Node> nodes = cn.listNodes().getNodeList(); |
... | ... | |
332 | 308 |
} |
333 | 309 |
} |
334 | 310 |
|
335 |
if ( !allowed && !isAdminAuthorized(session, pid, Permission.WRITE)) { |
|
336 |
String msg = "The subject identified by " + subject.getValue() + |
|
337 |
" does not have permission to set the replication status for " + |
|
338 |
"the replica identified by " + targetNode.getValue() + "."; |
|
339 |
logMetacat.info(msg); |
|
340 |
throw new NotAuthorized("4720", msg); |
|
311 |
if ( !isAdminAuthorized(session, pid, Permission.WRITE) ) { |
|
312 |
if (!allowed) { |
|
313 |
String msg = "The subject identified by " |
|
314 |
+ subject.getValue() |
|
315 |
+ " does not have permission to set the replication status for " |
|
316 |
+ "the replica identified by " |
|
317 |
+ targetNode.getValue() + "."; |
|
318 |
logMetacat.info(msg); |
|
319 |
throw new NotAuthorized("4720", msg); |
|
320 |
} |
|
341 | 321 |
|
342 | 322 |
} |
343 | 323 |
|
344 |
// was there a failure? log it |
|
345 |
if ( failure != null && status == ReplicationStatus.FAILED ) { |
|
346 |
String msg = "The replication request of the object identified by " + |
|
347 |
pid.getValue() + " failed. The error message was " + |
|
348 |
failure.getMessage() + "."; |
|
349 |
} |
|
350 |
|
|
324 |
|
|
351 | 325 |
} catch (RuntimeException e) { // Catch is generic since HZ throws RuntimeException |
352 | 326 |
throw new NotFound("4740", "No record found for: " + pid.getValue() + |
353 | 327 |
" : " + e.getMessage()); |
354 | 328 |
|
355 | 329 |
} |
356 | 330 |
|
331 |
Replica targetReplica = new Replica(); |
|
357 | 332 |
// set the status for the replica |
358 | 333 |
if ( replicaEntryIndex != -1 ) { |
359 |
Replica targetReplica = replicas.get(replicaEntryIndex);
|
|
334 |
targetReplica = replicas.get(replicaEntryIndex); |
|
360 | 335 |
targetReplica.setReplicationStatus(status); |
361 | 336 |
logMetacat.debug("Set the replication status for " + |
362 | 337 |
targetReplica.getReplicaMemberNode().getValue() + " to " + |
363 | 338 |
targetReplica.getReplicationStatus()); |
364 | 339 |
|
365 | 340 |
} else { |
366 |
throw new InvalidRequest("4730", "There are no replicas to update."); |
|
367 |
|
|
341 |
// this is a new entry, create it |
|
342 |
targetReplica.setReplicaMemberNode(targetNode); |
|
343 |
targetReplica.setReplicationStatus(status); |
|
344 |
targetReplica.setReplicaVerified(Calendar.getInstance().getTime()); |
|
345 |
replicas.add(targetReplica); |
|
346 |
|
|
368 | 347 |
} |
369 | 348 |
|
370 | 349 |
systemMetadata.setReplicaList(replicas); |
Also available in: Unified diff
Simplify setReplicationStatus() to not call updateReplicationMetadata() if a replica doesn't exist. Just create it and update the system metadata, which we already have a lock for.