001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements. See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License. You may obtain a copy of the License at
008 *
009 * http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017 package org.apache.kahadb.journal;
018
019 import java.io.File;
020 import java.io.FilenameFilter;
021 import java.io.IOException;
022 import java.io.UnsupportedEncodingException;
023 import java.util.ArrayList;
024 import java.util.Collections;
025 import java.util.HashMap;
026 import java.util.Iterator;
027 import java.util.LinkedHashMap;
028 import java.util.List;
029 import java.util.Map;
030 import java.util.Set;
031 import java.util.Timer;
032 import java.util.TimerTask;
033 import java.util.TreeMap;
034 import java.util.concurrent.ConcurrentHashMap;
035 import java.util.concurrent.atomic.AtomicLong;
036 import java.util.concurrent.atomic.AtomicReference;
037 import java.util.zip.Adler32;
038 import java.util.zip.Checksum;
039 import org.apache.commons.logging.Log;
040 import org.apache.commons.logging.LogFactory;
041 import org.apache.kahadb.journal.DataFileAppender.WriteCommand;
042 import org.apache.kahadb.journal.DataFileAppender.WriteKey;
043 import org.apache.kahadb.util.ByteSequence;
044 import org.apache.kahadb.util.DataByteArrayInputStream;
045 import org.apache.kahadb.util.DataByteArrayOutputStream;
046 import org.apache.kahadb.util.LinkedNodeList;
047 import org.apache.kahadb.util.SchedulerTimerTask;
048 import org.apache.kahadb.util.Sequence;
049
050 /**
051 * Manages DataFiles
052 *
053 * @version $Revision: 962468 $
054 */
055 public class Journal {
056
057 private static final int MAX_BATCH_SIZE = 32*1024*1024;
058
059 // ITEM_HEAD_SPACE = length + type+ reserved space + SOR
060 public static final int RECORD_HEAD_SPACE = 4 + 1;
061
062 public static final byte USER_RECORD_TYPE = 1;
063 public static final byte BATCH_CONTROL_RECORD_TYPE = 2;
064 // Batch Control Item holds a 4 byte size of the batch and a 8 byte checksum of the batch.
065 public static final byte[] BATCH_CONTROL_RECORD_MAGIC = bytes("WRITE BATCH");
066 public static final int BATCH_CONTROL_RECORD_SIZE = RECORD_HEAD_SPACE+BATCH_CONTROL_RECORD_MAGIC.length+4+8;
067 public static final byte[] BATCH_CONTROL_RECORD_HEADER = createBatchControlRecordHeader();
068
069 private static byte[] createBatchControlRecordHeader() {
070 try {
071 DataByteArrayOutputStream os = new DataByteArrayOutputStream();
072 os.writeInt(BATCH_CONTROL_RECORD_SIZE);
073 os.writeByte(BATCH_CONTROL_RECORD_TYPE);
074 os.write(BATCH_CONTROL_RECORD_MAGIC);
075 ByteSequence sequence = os.toByteSequence();
076 sequence.compact();
077 return sequence.getData();
078 } catch (IOException e) {
079 throw new RuntimeException("Could not create batch control record header.");
080 }
081 }
082
083 public static final String DEFAULT_DIRECTORY = ".";
084 public static final String DEFAULT_ARCHIVE_DIRECTORY = "data-archive";
085 public static final String DEFAULT_FILE_PREFIX = "db-";
086 public static final String DEFAULT_FILE_SUFFIX = ".log";
087 public static final int DEFAULT_MAX_FILE_LENGTH = 1024 * 1024 * 32;
088 public static final int DEFAULT_CLEANUP_INTERVAL = 1000 * 30;
089 public static final int PREFERED_DIFF = 1024 * 512;
090 public static final int DEFAULT_MAX_WRITE_BATCH_SIZE = 1024 * 1024 * 4;
091
092 private static final Log LOG = LogFactory.getLog(Journal.class);
093
094 protected final Map<WriteKey, WriteCommand> inflightWrites = new ConcurrentHashMap<WriteKey, WriteCommand>();
095
096 protected File directory = new File(DEFAULT_DIRECTORY);
097 protected File directoryArchive = new File(DEFAULT_ARCHIVE_DIRECTORY);
098 protected String filePrefix = DEFAULT_FILE_PREFIX;
099 protected String fileSuffix = DEFAULT_FILE_SUFFIX;
100 protected boolean started;
101
102 protected int maxFileLength = DEFAULT_MAX_FILE_LENGTH;
103 protected int preferedFileLength = DEFAULT_MAX_FILE_LENGTH - PREFERED_DIFF;
104 protected int writeBatchSize = DEFAULT_MAX_WRITE_BATCH_SIZE;
105
106 protected DataFileAppender appender;
107 protected DataFileAccessorPool accessorPool;
108
109 protected Map<Integer, DataFile> fileMap = new HashMap<Integer, DataFile>();
110 protected Map<File, DataFile> fileByFileMap = new LinkedHashMap<File, DataFile>();
111 protected LinkedNodeList<DataFile> dataFiles = new LinkedNodeList<DataFile>();
112
113 protected final AtomicReference<Location> lastAppendLocation = new AtomicReference<Location>();
114 protected Runnable cleanupTask;
115 protected AtomicLong totalLength = new AtomicLong();
116 protected boolean archiveDataLogs;
117 private ReplicationTarget replicationTarget;
118 protected boolean checksum;
119 protected boolean checkForCorruptionOnStartup;
120 private Timer timer = new Timer("KahaDB Scheduler", true);
121
122
123 public synchronized void start() throws IOException {
124 if (started) {
125 return;
126 }
127
128 long start = System.currentTimeMillis();
129 accessorPool = new DataFileAccessorPool(this);
130 started = true;
131 preferedFileLength = Math.max(PREFERED_DIFF, getMaxFileLength() - PREFERED_DIFF);
132
133 appender = new DataFileAppender(this);
134
135 File[] files = directory.listFiles(new FilenameFilter() {
136 public boolean accept(File dir, String n) {
137 return dir.equals(directory) && n.startsWith(filePrefix) && n.endsWith(fileSuffix);
138 }
139 });
140
141 if (files != null) {
142 for (int i = 0; i < files.length; i++) {
143 try {
144 File file = files[i];
145 String n = file.getName();
146 String numStr = n.substring(filePrefix.length(), n.length()-fileSuffix.length());
147 int num = Integer.parseInt(numStr);
148 DataFile dataFile = new DataFile(file, num, preferedFileLength);
149 fileMap.put(dataFile.getDataFileId(), dataFile);
150 totalLength.addAndGet(dataFile.getLength());
151 } catch (NumberFormatException e) {
152 // Ignore file that do not match the pattern.
153 }
154 }
155
156 // Sort the list so that we can link the DataFiles together in the
157 // right order.
158 List<DataFile> l = new ArrayList<DataFile>(fileMap.values());
159 Collections.sort(l);
160 for (DataFile df : l) {
161 dataFiles.addLast(df);
162 fileByFileMap.put(df.getFile(), df);
163
164 if( isCheckForCorruptionOnStartup() ) {
165 lastAppendLocation.set(recoveryCheck(df));
166 }
167 }
168 }
169
170 getCurrentWriteFile();
171
172 if( lastAppendLocation.get()==null ) {
173 DataFile df = dataFiles.getTail();
174 lastAppendLocation.set(recoveryCheck(df));
175 }
176
177 cleanupTask = new Runnable() {
178 public void run() {
179 cleanup();
180 }
181 };
182 this.timer = new Timer("KahaDB Scheduler", true);
183 TimerTask task = new SchedulerTimerTask(cleanupTask);
184 this.timer.scheduleAtFixedRate(task, DEFAULT_CLEANUP_INTERVAL,DEFAULT_CLEANUP_INTERVAL);
185 long end = System.currentTimeMillis();
186 LOG.trace("Startup took: "+(end-start)+" ms");
187 }
188
189 private static byte[] bytes(String string) {
190 try {
191 return string.getBytes("UTF-8");
192 } catch (UnsupportedEncodingException e) {
193 throw new RuntimeException(e);
194 }
195 }
196
197 protected Location recoveryCheck(DataFile dataFile) throws IOException {
198 Location location = new Location();
199 location.setDataFileId(dataFile.getDataFileId());
200 location.setOffset(0);
201
202 DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
203 try {
204 while( true ) {
205 int size = checkBatchRecord(reader, location.getOffset());
206 if ( size>=0 ) {
207 location.setOffset(location.getOffset()+BATCH_CONTROL_RECORD_SIZE+size);
208 } else {
209
210 // Perhaps it's just some corruption... scan through the file to find the next valid batch record. We
211 // may have subsequent valid batch records.
212 int nextOffset = findNextBatchRecord(reader, location.getOffset()+1);
213 if( nextOffset >=0 ) {
214 Sequence sequence = new Sequence(location.getOffset(), nextOffset - 1);
215 LOG.info("Corrupt journal records found in '"+dataFile.getFile()+"' between offsets: "+sequence);
216 dataFile.corruptedBlocks.add(sequence);
217 location.setOffset(nextOffset);
218 } else {
219 break;
220 }
221 }
222 }
223
224 } catch (IOException e) {
225 } finally {
226 accessorPool.closeDataFileAccessor(reader);
227 }
228
229 int existingLen = dataFile.getLength();
230 dataFile.setLength(location.getOffset());
231 if (existingLen > dataFile.getLength()) {
232 totalLength.addAndGet(dataFile.getLength() - existingLen);
233 }
234
235 if( !dataFile.corruptedBlocks.isEmpty() ) {
236 // Is the end of the data file corrupted?
237 if( dataFile.corruptedBlocks.getTail().getLast()+1 == location.getOffset() ) {
238 dataFile.setLength((int) dataFile.corruptedBlocks.removeLastSequence().getFirst());
239 }
240 }
241
242 return location;
243 }
244
245 private int findNextBatchRecord(DataFileAccessor reader, int offset) throws IOException {
246 ByteSequence header = new ByteSequence(BATCH_CONTROL_RECORD_HEADER);
247 byte data[] = new byte[1024*4];
248 ByteSequence bs = new ByteSequence(data, 0, reader.read(offset, data));
249
250 int pos = 0;
251 while( true ) {
252 pos = bs.indexOf(header, pos);
253 if( pos >= 0 ) {
254 return offset+pos;
255 } else {
256 // need to load the next data chunck in..
257 if( bs.length != data.length ) {
258 // If we had a short read then we were at EOF
259 return -1;
260 }
261 offset += bs.length-BATCH_CONTROL_RECORD_HEADER.length;
262 bs = new ByteSequence(data, 0, reader.read(offset, data));
263 pos=0;
264 }
265 }
266 }
267
268
269 public int checkBatchRecord(DataFileAccessor reader, int offset) throws IOException {
270 byte controlRecord[] = new byte[BATCH_CONTROL_RECORD_SIZE];
271 DataByteArrayInputStream controlIs = new DataByteArrayInputStream(controlRecord);
272
273 reader.readFully(offset, controlRecord);
274
275 // Assert that it's a batch record.
276 for( int i=0; i < BATCH_CONTROL_RECORD_HEADER.length; i++ ) {
277 if( controlIs.readByte() != BATCH_CONTROL_RECORD_HEADER[i] ) {
278 return -1;
279 }
280 }
281
282 int size = controlIs.readInt();
283 if( size > MAX_BATCH_SIZE ) {
284 return -1;
285 }
286
287 if( isChecksum() ) {
288
289 long expectedChecksum = controlIs.readLong();
290 if( expectedChecksum == 0 ) {
291 // Checksuming was not enabled when the record was stored.
292 // we can't validate the record :(
293 return size;
294 }
295
296 byte data[] = new byte[size];
297 reader.readFully(offset+BATCH_CONTROL_RECORD_SIZE, data);
298
299 Checksum checksum = new Adler32();
300 checksum.update(data, 0, data.length);
301
302 if( expectedChecksum!=checksum.getValue() ) {
303 return -1;
304 }
305
306 }
307 return size;
308 }
309
310
311 void addToTotalLength(int size) {
312 totalLength.addAndGet(size);
313 }
314
315
316 synchronized DataFile getCurrentWriteFile() throws IOException {
317 if (dataFiles.isEmpty()) {
318 rotateWriteFile();
319 }
320 return dataFiles.getTail();
321 }
322
323 synchronized DataFile rotateWriteFile() {
324 int nextNum = !dataFiles.isEmpty() ? dataFiles.getTail().getDataFileId().intValue() + 1 : 1;
325 File file = getFile(nextNum);
326 DataFile nextWriteFile = new DataFile(file, nextNum, preferedFileLength);
327 // actually allocate the disk space
328 fileMap.put(nextWriteFile.getDataFileId(), nextWriteFile);
329 fileByFileMap.put(file, nextWriteFile);
330 dataFiles.addLast(nextWriteFile);
331 return nextWriteFile;
332 }
333
334 public File getFile(int nextNum) {
335 String fileName = filePrefix + nextNum + fileSuffix;
336 File file = new File(directory, fileName);
337 return file;
338 }
339
340 synchronized DataFile getDataFile(Location item) throws IOException {
341 Integer key = Integer.valueOf(item.getDataFileId());
342 DataFile dataFile = fileMap.get(key);
343 if (dataFile == null) {
344 LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
345 throw new IOException("Could not locate data file " + getFile(item.getDataFileId()));
346 }
347 return dataFile;
348 }
349
350 synchronized File getFile(Location item) throws IOException {
351 Integer key = Integer.valueOf(item.getDataFileId());
352 DataFile dataFile = fileMap.get(key);
353 if (dataFile == null) {
354 LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
355 throw new IOException("Could not locate data file " + getFile(item.getDataFileId()));
356 }
357 return dataFile.getFile();
358 }
359
360 private DataFile getNextDataFile(DataFile dataFile) {
361 return dataFile.getNext();
362 }
363
364 public synchronized void close() throws IOException {
365 if (!started) {
366 return;
367 }
368 if (this.timer != null) {
369 this.timer.cancel();
370 }
371 accessorPool.close();
372 appender.close();
373 fileMap.clear();
374 fileByFileMap.clear();
375 dataFiles.clear();
376 lastAppendLocation.set(null);
377 started = false;
378 }
379
380 synchronized void cleanup() {
381 if (accessorPool != null) {
382 accessorPool.disposeUnused();
383 }
384 }
385
386 public synchronized boolean delete() throws IOException {
387
388 // Close all open file handles...
389 appender.close();
390 accessorPool.close();
391
392 boolean result = true;
393 for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
394 DataFile dataFile = i.next();
395 totalLength.addAndGet(-dataFile.getLength());
396 result &= dataFile.delete();
397 }
398 fileMap.clear();
399 fileByFileMap.clear();
400 lastAppendLocation.set(null);
401 dataFiles = new LinkedNodeList<DataFile>();
402
403 // reopen open file handles...
404 accessorPool = new DataFileAccessorPool(this);
405 appender = new DataFileAppender(this);
406 return result;
407 }
408
409 public synchronized void removeDataFiles(Set<Integer> files) throws IOException {
410 for (Integer key : files) {
411 // Can't remove the data file (or subsequent files) that is currently being written to.
412 if( key >= lastAppendLocation.get().getDataFileId() ) {
413 continue;
414 }
415 DataFile dataFile = fileMap.get(key);
416 if( dataFile!=null ) {
417 forceRemoveDataFile(dataFile);
418 }
419 }
420 }
421
422 private synchronized void forceRemoveDataFile(DataFile dataFile) throws IOException {
423 accessorPool.disposeDataFileAccessors(dataFile);
424 fileByFileMap.remove(dataFile.getFile());
425 fileMap.remove(dataFile.getDataFileId());
426 totalLength.addAndGet(-dataFile.getLength());
427 dataFile.unlink();
428 if (archiveDataLogs) {
429 dataFile.move(getDirectoryArchive());
430 LOG.debug("moved data file " + dataFile + " to " + getDirectoryArchive());
431 } else {
432 if ( dataFile.delete() ) {
433 LOG.debug("Discarded data file " + dataFile);
434 } else {
435 LOG.warn("Failed to discard data file " + dataFile.getFile());
436 }
437 }
438 }
439
440 /**
441 * @return the maxFileLength
442 */
443 public int getMaxFileLength() {
444 return maxFileLength;
445 }
446
447 /**
448 * @param maxFileLength the maxFileLength to set
449 */
450 public void setMaxFileLength(int maxFileLength) {
451 this.maxFileLength = maxFileLength;
452 }
453
454 @Override
455 public String toString() {
456 return directory.toString();
457 }
458
459 public synchronized void appendedExternally(Location loc, int length) throws IOException {
460 DataFile dataFile = null;
461 if( dataFiles.getTail().getDataFileId() == loc.getDataFileId() ) {
462 // It's an update to the current log file..
463 dataFile = dataFiles.getTail();
464 dataFile.incrementLength(length);
465 } else if( dataFiles.getTail().getDataFileId()+1 == loc.getDataFileId() ) {
466 // It's an update to the next log file.
467 int nextNum = loc.getDataFileId();
468 File file = getFile(nextNum);
469 dataFile = new DataFile(file, nextNum, preferedFileLength);
470 // actually allocate the disk space
471 fileMap.put(dataFile.getDataFileId(), dataFile);
472 fileByFileMap.put(file, dataFile);
473 dataFiles.addLast(dataFile);
474 } else {
475 throw new IOException("Invalid external append.");
476 }
477 }
478
479 public synchronized Location getNextLocation(Location location) throws IOException, IllegalStateException {
480
481 Location cur = null;
482 while (true) {
483 if (cur == null) {
484 if (location == null) {
485 DataFile head = dataFiles.getHead();
486 if( head == null ) {
487 return null;
488 }
489 cur = new Location();
490 cur.setDataFileId(head.getDataFileId());
491 cur.setOffset(0);
492 } else {
493 // Set to the next offset..
494 if (location.getSize() == -1) {
495 cur = new Location(location);
496 } else {
497 cur = new Location(location);
498 cur.setOffset(location.getOffset() + location.getSize());
499 }
500 }
501 } else {
502 cur.setOffset(cur.getOffset() + cur.getSize());
503 }
504
505 DataFile dataFile = getDataFile(cur);
506
507 // Did it go into the next file??
508 if (dataFile.getLength() <= cur.getOffset()) {
509 dataFile = getNextDataFile(dataFile);
510 if (dataFile == null) {
511 return null;
512 } else {
513 cur.setDataFileId(dataFile.getDataFileId().intValue());
514 cur.setOffset(0);
515 }
516 }
517
518 // Load in location size and type.
519 DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
520 try {
521 reader.readLocationDetails(cur);
522 } finally {
523 accessorPool.closeDataFileAccessor(reader);
524 }
525
526 if (cur.getType() == 0) {
527 return null;
528 } else if (cur.getType() == USER_RECORD_TYPE) {
529 // Only return user records.
530 return cur;
531 }
532 }
533 }
534
535 public synchronized Location getNextLocation(File file, Location lastLocation, boolean thisFileOnly) throws IllegalStateException, IOException {
536 DataFile df = fileByFileMap.get(file);
537 return getNextLocation(df, lastLocation, thisFileOnly);
538 }
539
540 public synchronized Location getNextLocation(DataFile dataFile, Location lastLocation, boolean thisFileOnly) throws IOException, IllegalStateException {
541
542 Location cur = null;
543 while (true) {
544 if (cur == null) {
545 if (lastLocation == null) {
546 DataFile head = dataFile.getHeadNode();
547 cur = new Location();
548 cur.setDataFileId(head.getDataFileId());
549 cur.setOffset(0);
550 } else {
551 // Set to the next offset..
552 cur = new Location(lastLocation);
553 cur.setOffset(cur.getOffset() + cur.getSize());
554 }
555 } else {
556 cur.setOffset(cur.getOffset() + cur.getSize());
557 }
558
559 // Did it go into the next file??
560 if (dataFile.getLength() <= cur.getOffset()) {
561 if (thisFileOnly) {
562 return null;
563 } else {
564 dataFile = getNextDataFile(dataFile);
565 if (dataFile == null) {
566 return null;
567 } else {
568 cur.setDataFileId(dataFile.getDataFileId().intValue());
569 cur.setOffset(0);
570 }
571 }
572 }
573
574 // Load in location size and type.
575 DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
576 try {
577 reader.readLocationDetails(cur);
578 } finally {
579 accessorPool.closeDataFileAccessor(reader);
580 }
581
582 if (cur.getType() == 0) {
583 return null;
584 } else if (cur.getType() > 0) {
585 // Only return user records.
586 return cur;
587 }
588 }
589 }
590
591 public synchronized ByteSequence read(Location location) throws IOException, IllegalStateException {
592 DataFile dataFile = getDataFile(location);
593 DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
594 ByteSequence rc = null;
595 try {
596 rc = reader.readRecord(location);
597 } finally {
598 accessorPool.closeDataFileAccessor(reader);
599 }
600 return rc;
601 }
602
603 public Location write(ByteSequence data, boolean sync) throws IOException, IllegalStateException {
604 Location loc = appender.storeItem(data, Location.USER_TYPE, sync);
605 return loc;
606 }
607
608 public Location write(ByteSequence data, Runnable onComplete) throws IOException, IllegalStateException {
609 Location loc = appender.storeItem(data, Location.USER_TYPE, onComplete);
610 return loc;
611 }
612
613 public void update(Location location, ByteSequence data, boolean sync) throws IOException {
614 DataFile dataFile = getDataFile(location);
615 DataFileAccessor updater = accessorPool.openDataFileAccessor(dataFile);
616 try {
617 updater.updateRecord(location, data, sync);
618 } finally {
619 accessorPool.closeDataFileAccessor(updater);
620 }
621 }
622
623 public File getDirectory() {
624 return directory;
625 }
626
627 public void setDirectory(File directory) {
628 this.directory = directory;
629 }
630
631 public String getFilePrefix() {
632 return filePrefix;
633 }
634
635 public void setFilePrefix(String filePrefix) {
636 this.filePrefix = filePrefix;
637 }
638
639 public Map<WriteKey, WriteCommand> getInflightWrites() {
640 return inflightWrites;
641 }
642
643 public Location getLastAppendLocation() {
644 return lastAppendLocation.get();
645 }
646
647 public void setLastAppendLocation(Location lastSyncedLocation) {
648 this.lastAppendLocation.set(lastSyncedLocation);
649 }
650
651 public File getDirectoryArchive() {
652 return directoryArchive;
653 }
654
655 public void setDirectoryArchive(File directoryArchive) {
656 this.directoryArchive = directoryArchive;
657 }
658
659 public boolean isArchiveDataLogs() {
660 return archiveDataLogs;
661 }
662
663 public void setArchiveDataLogs(boolean archiveDataLogs) {
664 this.archiveDataLogs = archiveDataLogs;
665 }
666
667 synchronized public Integer getCurrentDataFileId() {
668 if (dataFiles.isEmpty())
669 return null;
670 return dataFiles.getTail().getDataFileId();
671 }
672
673 /**
674 * Get a set of files - only valid after start()
675 *
676 * @return files currently being used
677 */
678 public Set<File> getFiles() {
679 return fileByFileMap.keySet();
680 }
681
682 public Map<Integer, DataFile> getFileMap() {
683 return new TreeMap<Integer, DataFile>(fileMap);
684 }
685
686 public long getDiskSize() {
687 long tailLength=0;
688 synchronized( this ) {
689 if( !dataFiles.isEmpty() ) {
690 tailLength = dataFiles.getTail().getLength();
691 }
692 }
693
694 long rc = totalLength.get();
695
696 // The last file is actually at a minimum preferedFileLength big.
697 if( tailLength < preferedFileLength ) {
698 rc -= tailLength;
699 rc += preferedFileLength;
700 }
701 return rc;
702 }
703
704 public void setReplicationTarget(ReplicationTarget replicationTarget) {
705 this.replicationTarget = replicationTarget;
706 }
707 public ReplicationTarget getReplicationTarget() {
708 return replicationTarget;
709 }
710
711 public String getFileSuffix() {
712 return fileSuffix;
713 }
714
715 public void setFileSuffix(String fileSuffix) {
716 this.fileSuffix = fileSuffix;
717 }
718
719 public boolean isChecksum() {
720 return checksum;
721 }
722
723 public void setChecksum(boolean checksumWrites) {
724 this.checksum = checksumWrites;
725 }
726
727 public boolean isCheckForCorruptionOnStartup() {
728 return checkForCorruptionOnStartup;
729 }
730
731 public void setCheckForCorruptionOnStartup(boolean checkForCorruptionOnStartup) {
732 this.checkForCorruptionOnStartup = checkForCorruptionOnStartup;
733 }
734
735 public void setWriteBatchSize(int writeBatchSize) {
736 this.writeBatchSize = writeBatchSize;
737 }
738
739 public int getWriteBatchSize() {
740 return writeBatchSize;
741 }
742
743 public void setSizeAccumulator(AtomicLong storeSizeAccumulator) {
744 this.totalLength = storeSizeAccumulator;
745 }
746 }