1 /**
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19 package org.apache.hadoop.hbase.regionserver;
20
21 import java.io.IOException;
22 import java.util.Collection;
23 import java.util.Comparator;
24 import java.util.Iterator;
25 import java.util.List;
26
27 import com.google.common.collect.ImmutableCollection;
28
29 import org.apache.hadoop.hbase.Cell;
30 import org.apache.hadoop.hbase.KeyValue;
31 import org.apache.hadoop.hbase.classification.InterfaceAudience;
32
33 /**
34 * Manages the store files and basic metadata about that that determines the logical structure
35 * (e.g. what files to return for scan, how to determine split point, and such).
36 * Does NOT affect the physical structure of files in HDFS.
37 * Example alternative structures - the default list of files by seqNum; levelDB one sorted
38 * by level and seqNum.
39 *
40 * Implementations are assumed to be not thread safe.
41 */
42 @InterfaceAudience.Private
43 public interface StoreFileManager {
44 /**
45 * Loads the initial store files into empty StoreFileManager.
46 * @param storeFiles The files to load.
47 */
48 void loadFiles(List<StoreFile> storeFiles);
49
50 /**
51 * Adds new files, either for from MemStore flush or bulk insert, into the structure.
52 * @param sfs New store files.
53 */
54 void insertNewFiles(Collection<StoreFile> sfs) throws IOException;
55
56 /**
57 * Adds compaction results into the structure.
58 * @param compactedFiles The input files for the compaction.
59 * @param results The resulting files for the compaction.
60 */
61 void addCompactionResults(
62 Collection<StoreFile> compactedFiles, Collection<StoreFile> results) throws IOException;
63
64 /**
65 * Clears all the files currently in use and returns them.
66 * @return The files previously in use.
67 */
68 ImmutableCollection<StoreFile> clearFiles();
69
70 /**
71 * Gets the snapshot of the store files currently in use. Can be used for things like metrics
72 * and checks; should not assume anything about relations between store files in the list.
73 * @return The list of StoreFiles.
74 */
75 Collection<StoreFile> getStorefiles();
76
77 /**
78 * Returns the number of files currently in use.
79 * @return The number of files.
80 */
81 int getStorefileCount();
82
83 /**
84 * Gets the store files to scan for a Scan or Get request.
85 * @param isGet Whether it's a get.
86 * @param startRow Start row of the request.
87 * @param stopRow Stop row of the request.
88 * @return The list of files that are to be read for this request.
89 */
90 Collection<StoreFile> getFilesForScanOrGet(
91 boolean isGet, byte[] startRow, byte[] stopRow
92 );
93
94 /**
95 * Gets initial, full list of candidate store files to check for row-key-before.
96 * @param targetKey The key that is the basis of the search.
97 * @return The files that may have the key less than or equal to targetKey, in reverse
98 * order of new-ness, and preference for target key.
99 */
100 Iterator<StoreFile> getCandidateFilesForRowKeyBefore(
101 KeyValue targetKey
102 );
103
104 /**
105 * Updates the candidate list for finding row key before. Based on the list of candidates
106 * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate,
107 * may trim and reorder the list to remove the files where a better candidate cannot be found.
108 * @param candidateFiles The candidate files not yet checked for better candidates - return
109 * value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)},
110 * with some files already removed.
111 * @param targetKey The key to search for.
112 * @param candidate The current best candidate found.
113 * @return The list to replace candidateFiles.
114 */
115 Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(
116 Iterator<StoreFile> candidateFiles, KeyValue targetKey, Cell candidate
117 );
118
119
120 /**
121 * Gets the split point for the split of this set of store files (approx. middle).
122 * @return The mid-point, or null if no split is possible.
123 * @throws IOException
124 */
125 byte[] getSplitPoint() throws IOException;
126
127 /**
128 * @return The store compaction priority.
129 */
130 int getStoreCompactionPriority();
131
132 /**
133 * @param maxTs Maximum expired timestamp.
134 * @param filesCompacting Files that are currently compacting.
135 * @return The files which don't have any necessary data according to TTL and other criteria.
136 */
137 Collection<StoreFile> getUnneededFiles(long maxTs, List<StoreFile> filesCompacting);
138
139 /**
140 * @return the compaction pressure used for compaction throughput tuning.
141 * @see Store#getCompactionPressure()
142 */
143 double getCompactionPressure();
144
145 /**
146 * @return the comparator used to sort storefiles. Usually, the
147 * {@link StoreFile#getMaxSequenceId()} is the first priority.
148 */
149 Comparator<StoreFile> getStoreFileComparator();
150 }