Clover coverage report -
Coverage timestamp: Sun Nov 1 2009 23:08:24 UTC
file stats: LOC: 505   Methods: 37
NCLOC: 332   Classes: 4
 
 Source file Conditionals Statements Methods TOTAL
HashFiler.java 72% 84.4% 83.8% 82%
coverage coverage
 1    /*
 2    * Licensed to the Apache Software Foundation (ASF) under one or more
 3    * contributor license agreements. See the NOTICE file distributed with
 4    * this work for additional information regarding copyright ownership.
 5    * The ASF licenses this file to You under the Apache License, Version 2.0
 6    * (the "License"); you may not use this file except in compliance with
 7    * the License. You may obtain a copy of the License at
 8    *
 9    * http://www.apache.org/licenses/LICENSE-2.0
 10    *
 11    * Unless required by applicable law or agreed to in writing, software
 12    * distributed under the License is distributed on an "AS IS" BASIS,
 13    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 14    * See the License for the specific language governing permissions and
 15    * limitations under the License.
 16    *
 17    * $Id: HashFiler.java 571938 2007-09-02 10:14:13Z vgritsenko $
 18    */
 19   
 20    package org.apache.xindice.core.filer;
 21   
 22    import org.apache.commons.logging.Log;
 23    import org.apache.commons.logging.LogFactory;
 24    import org.apache.xindice.core.DBException;
 25    import org.apache.xindice.core.FaultCodes;
 26    import org.apache.xindice.core.data.Key;
 27    import org.apache.xindice.core.data.Record;
 28    import org.apache.xindice.core.data.RecordSet;
 29    import org.apache.xindice.core.data.Value;
 30    import org.apache.xindice.util.Configuration;
 31   
 32    import java.io.DataInput;
 33    import java.io.DataOutput;
 34    import java.io.File;
 35    import java.io.IOException;
 36    import java.io.RandomAccessFile;
 37    import java.util.ArrayList;
 38    import java.util.HashMap;
 39    import java.util.Iterator;
 40    import java.util.List;
 41   
 42    /**
 43    * HashFiler is a Filer implementation based on the Paged class. By
 44    * extending Paged, HashFiler inherits the ability to maintain Record
 45    * metadata such as creation and modification time. It also provides
 46    * quite a bit more flexibility in its ability to retreive blocks of
 47    * data and allocate Record space.
 48    *
 49    * <br/>
 50    * HashFile has folowing configuration attributes:
 51    * <ul>
 52    * <li><strong>pagesize</strong>: Size of the page used by the paged file.
 53    * Default page size is 4096 bytes. This parameter can be set only
 54    * before paged file is created. Once it is created, this parameter
 55    * can not be changed.</li>
 56    * <li><strong>pagecount</strong>: This parameter has a special meaning
 57    * for HashFiler. This determines the size of the hash table main
 58    * storage, which is equal to the number of pages filer will be
 59    * created with. The default is 1024. Please note that if made
 60    * too small, it will affect efficiency of the hash table.</li>
 61    * <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
 62    * Default maximum key size is 256 bytes.</li>
 63    * <li><strong>max-descriptors</strong>: Defines maximum amount of
 64    * simultaneously opened file descriptors this paged file can have.
 65    * Several descriptors are needed to provide multithreaded access
 66    * to the underlying file. Too large number will limit amount of
 67    * collections you can open. Default value is 16
 68    * (DEFAULT_DESCRIPTORS_MAX).</li>
 69    * </ul>
 70    *
 71    * @version $Revision: 571938 $, $Date: 2007-09-02 03:14:13 -0700 (Sun, 02 Sep 2007) $
 72    */
 73    public class HashFiler extends Paged
 74    implements Filer {
 75   
 76    private static final Log log = LogFactory.getLog(HashFiler.class);
 77   
 78    /**
 79    * Record page status
 80    */
 81    protected static final byte RECORD = 1;
 82   
 83    private HashFileHeader fileHeader;
 84   
 85   
 86  19 public HashFiler() {
 87  19 super();
 88  19 fileHeader = (HashFileHeader) getFileHeader();
 89    }
 90   
 91  19 public void setLocation(File root, String location) {
 92  19 setFile(new File(root, location + ".tbl"));
 93    }
 94   
 95  19 public String getName() {
 96  19 return "HashFiler";
 97    }
 98   
 99  19 public void setConfig(Configuration config) {
 100  19 super.setConfig(config);
 101    // Since pageCount is used as a hash table size, all pageCount pages
 102    // are considered used; so set totalCount to pageCount.
 103  19 fileHeader.setTotalCount(fileHeader.getPageCount());
 104    }
 105   
 106  1961 private Page seekRecordPage(Key key) throws IOException {
 107  1961 int hash = key.hashCode();
 108  1961 long pageNum = hash % fileHeader.getPageCount();
 109  1961 Page p = getPage(pageNum);
 110  1961 synchronized (p) {
 111  1961 while (true) {
 112  25421 HashPageHeader ph = (HashPageHeader) p.getPageHeader();
 113  25421 if (ph.getStatus() == RECORD && ph.getKeyHash() == hash && p.getKey().equals(key)) {
 114  1958 return p;
 115    }
 116   
 117  23463 pageNum = ph.getNextCollision();
 118  23463 if (pageNum == NO_PAGE) {
 119  3 return null;
 120    }
 121  23460 p = getPage(pageNum);
 122    }
 123    }
 124    }
 125   
 126  0 public Record readRecord(Key key) throws DBException {
 127  0 return readRecord(key, false);
 128    }
 129   
 130  1963 public Record readRecord(Key key, boolean metaOnly) throws DBException {
 131  1963 if (key == null || key.getLength() == 0) {
 132  2 return null;
 133    }
 134  1961 checkOpened();
 135  1961 try {
 136  1961 Page startPage = seekRecordPage(key);
 137  1961 if (startPage != null) {
 138  1958 Value v = metaOnly ? null: readValue(startPage);
 139  1958 HashPageHeader sph = (HashPageHeader) startPage.getPageHeader();
 140   
 141  1958 HashMap meta = new HashMap(3);
 142  1958 meta.put(Record.CREATED, new Long(sph.getCreated()));
 143  1958 meta.put(Record.MODIFIED, new Long(sph.getModified()));
 144   
 145  1958 return new Record(key, v, meta);
 146    }
 147    } catch (Exception e) {
 148  0 if (log.isWarnEnabled()) {
 149  0 log.warn("ignored exception", e);
 150    }
 151    }
 152  3 return null;
 153    }
 154   
 155  3017 private Page seekInsertionPage(Key key) throws IOException {
 156    // Calculate hash and retrieve chain head page
 157  3017 int hash = key.hashCode();
 158  3016 Page p = getPage(hash % fileHeader.getPageCount());
 159   
 160    // Synchronize by chain head page
 161  3017 synchronized (p) {
 162  3017 HashPageHeader ph;
 163  3017 while (true) {
 164  29456 ph = (HashPageHeader) p.getPageHeader();
 165  29455 if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
 166    || (ph.getStatus() == RECORD && ph.getKeyHash() == hash && p.getKey().equals(key))) {
 167    // Found free page
 168  315 break;
 169    }
 170   
 171    // Check the chain
 172  29141 long pageNum = ph.getNextCollision();
 173  29141 if (pageNum == NO_PAGE) {
 174    // Reached end of chain, add new page
 175  2702 Page np = getFreePage();
 176   
 177  2701 ph.setNextCollision(np.getPageNum());
 178  2702 p.write();
 179   
 180  2702 p = np;
 181  2702 ph = (HashPageHeader) p.getPageHeader();
 182  2702 ph.setNextCollision(NO_PAGE);
 183  2702 break;
 184    }
 185   
 186    // Go to the next page in chain
 187  26439 p = getPage(pageNum);
 188    }
 189   
 190    // Here we have a page
 191  3017 long t = System.currentTimeMillis();
 192  3017 if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
 193    // This is a new Record
 194  2968 fileHeader.incRecordCount();
 195  2968 ph.setCreated(t);
 196    }
 197  3017 ph.setModified(t);
 198  3017 ph.setStatus(RECORD);
 199    }
 200   
 201  3017 return p;
 202    }
 203   
 204  3019 public Record writeRecord(Key key, Value value) throws DBException {
 205    // Check that key is not larger than space on the page
 206  3019 if (key == null || key.getLength() == 0 || key.getLength() > fileHeader.getPageSize() - fileHeader.getPageHeaderSize()) {
 207  2 throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid key: '" + key + "'");
 208    }
 209  3018 if (value == null) {
 210  1 throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Invalid null value");
 211    }
 212  3017 checkOpened();
 213  3017 Page p = null;
 214  3017 try {
 215  3017 p = seekInsertionPage(key);
 216  3015 p.setKey(key);
 217  3016 writeValue(p, value);
 218    } catch (Exception e) {
 219    // FIXME It's not enough. At this point, new record could have been added to the chain
 220  0 if (p != null) {
 221  0 p.getPageHeader().setStatus(DELETED);
 222  0 try {
 223  0 p.write();
 224    } catch (IOException ignored) {
 225    // Double exception
 226    }
 227    }
 228   
 229  0 throw new FilerException(FaultCodes.DBE_CANNOT_CREATE, "Exception: " + e, e);
 230    }
 231   
 232  3017 flush();
 233   
 234  3017 HashPageHeader ph = (HashPageHeader) p.getPageHeader();
 235  3016 HashMap meta = new HashMap(3);
 236  3017 meta.put(Record.CREATED, new Long(ph.getCreated()));
 237  3017 meta.put(Record.MODIFIED, new Long(ph.getModified()));
 238  3017 return new Record(key, value, meta);
 239    }
 240   
 241    /**
 242    * Mark pages in primary store as 'DELETED', and let Paged handle all
 243    * overflow pages.
 244    */
 245  1009 protected void unlinkPages(Page page) throws IOException {
 246    // Handle the page if it's in primary space by setting its status to
 247    // DELETED and freeing any overflow pages linked to it.
 248  1009 if (page.getPageNum() < fileHeader.getPageCount()) {
 249  133 long nextPage = page.getPageHeader().getNextPage();
 250  133 page.getPageHeader().setStatus(DELETED);
 251  133 page.getPageHeader().setNextPage(NO_PAGE);
 252  133 page.write();
 253   
 254    // If there are no chained pages, we are done.
 255  133 if (nextPage == NO_PAGE) {
 256  133 return;
 257    }
 258   
 259    // Free the chained pages from the page that was just removed
 260  0 page = getPage(nextPage);
 261    }
 262   
 263  876 super.unlinkPages(page);
 264    }
 265   
 266  1012 public boolean deleteRecord(Key key) throws DBException {
 267  1012 if (key == null || key.getLength() == 0) {
 268  2 return false;
 269    }
 270  1010 checkOpened();
 271  1010 try {
 272  1010 int hash = key.hashCode();
 273  1010 long pageNum = hash % fileHeader.getPageCount();
 274   
 275  1010 Page page = getPage(pageNum);
 276  1010 synchronized (page) {
 277  1010 HashPageHeader prevHead = null;
 278  1010 HashPageHeader pageHead;
 279   
 280  1010 Page prev = null;
 281  1010 while (true) {
 282  1886 pageHead = (HashPageHeader) page.getPageHeader();
 283  1886 if (pageHead.getStatus() == RECORD && pageHead.getKeyHash() == hash && page.getKey().equals(key)) {
 284  1009 break;
 285    }
 286   
 287  877 pageNum = pageHead.getNextCollision();
 288  877 if (pageNum == NO_PAGE) {
 289  1 return false;
 290    }
 291  876 prev = page;
 292  876 prevHead = pageHead;
 293  876 page = getPage(pageNum);
 294    }
 295   
 296  1009 if (prev != null) {
 297  876 prevHead.setNextCollision(pageHead.nextCollision);
 298  876 pageHead.setNextCollision(NO_PAGE);
 299  876 prev.write();
 300    }
 301   
 302  1009 unlinkPages(page);
 303    }
 304   
 305  1009 fileHeader.decRecordCount();
 306  1009 flush();
 307   
 308  1009 return true;
 309    } catch (Exception e) {
 310  0 if (log.isWarnEnabled()) {
 311  0 log.warn("ignored exception", e);
 312    }
 313    }
 314  0 return false;
 315    }
 316   
 317  46 public long getRecordCount() throws DBException {
 318  46 checkOpened();
 319  46 return fileHeader.getRecordCount();
 320    }
 321   
 322  23 public RecordSet getRecordSet() throws DBException {
 323  23 checkOpened();
 324  23 return new HashFilerRecordSet();
 325    }
 326   
 327    /**
 328    * HashFilerRecordSet that does not use a BTree.
 329    */
 330    private class HashFilerRecordSet implements RecordSet {
 331    private List keys = new ArrayList();
 332    private Iterator iter;
 333   
 334  23 public HashFilerRecordSet() {
 335  23 try {
 336  23 long pageNum = 0;
 337   
 338    // Iterate over main hash table...
 339  23 while (pageNum < fileHeader.getPageCount()) {
 340  2944 Page p = getPage(pageNum);
 341  2944 HashPageHeader ph = (HashPageHeader) p.getPageHeader();
 342  2944 if (ph.getStatus() == RECORD) {
 343  4 keys.add(p.getKey());
 344    }
 345   
 346    // ... and over collision chains
 347  2944 while (ph.getNextCollision() != NO_PAGE) {
 348  0 long pn = ph.getNextCollision();
 349  0 p = getPage(pn);
 350  0 ph = (HashPageHeader) p.getPageHeader();
 351  0 if (ph.getStatus() == RECORD) {
 352  0 keys.add(p.getKey());
 353    }
 354    }
 355   
 356  2944 pageNum++;
 357    }
 358   
 359  23 iter = keys.iterator();
 360    } catch (Exception e) {
 361  0 if (log.isWarnEnabled()) {
 362  0 log.warn("ignored exception", e);
 363    }
 364    }
 365    }
 366   
 367  0 public synchronized Key getNextKey() {
 368  0 return (Key) iter.next();
 369    }
 370   
 371  4 public synchronized Record getNextRecord() throws DBException {
 372  4 return readRecord((Key) iter.next(), false);
 373    }
 374   
 375  0 public synchronized Value getNextValue() throws DBException {
 376  0 return getNextRecord().getValue();
 377    }
 378   
 379  26 public synchronized boolean hasMoreRecords() {
 380  26 return iter.hasNext();
 381    }
 382    }
 383   
 384    ////////////////////////////////////////////////////////////////////
 385   
 386  19 public FileHeader createFileHeader() {
 387  19 return new HashFileHeader();
 388    }
 389   
 390  5259 public PageHeader createPageHeader() {
 391  5259 return new HashPageHeader();
 392    }
 393   
 394    /**
 395    * HashFileHeader
 396    */
 397    private final class HashFileHeader extends FileHeader {
 398    private long totalBytes;
 399   
 400  19 public HashFileHeader() {
 401  19 super();
 402    // For hash filer, totalCount >= pageCount. See setConfig().
 403  19 setTotalCount(getPageCount());
 404    }
 405   
 406  19 protected synchronized void read(RandomAccessFile raf) throws IOException {
 407  19 super.read(raf);
 408  19 totalBytes = raf.readLong();
 409    }
 410   
 411  2880 protected synchronized void write(RandomAccessFile raf) throws IOException {
 412  2880 super.write(raf);
 413  2880 raf.writeLong(totalBytes);
 414    }
 415   
 416    /** The total number of bytes in use by the file */
 417  0 public synchronized void setTotalBytes(long totalBytes) {
 418  0 this.totalBytes = totalBytes;
 419  0 setDirty();
 420    }
 421   
 422    /** The total number of bytes in use by the file */
 423  0 public synchronized long getTotalBytes() {
 424  0 return totalBytes;
 425    }
 426   
 427    /** Adjust total number of bytes in use by the file */
 428  6034 public synchronized void addTotalBytes(int count) {
 429  6034 totalBytes += count;
 430    }
 431    }
 432   
 433    /**
 434    * HashPageHeader
 435    */
 436    protected final class HashPageHeader extends PageHeader {
 437    private long created = 0;
 438    private long modified = 0;
 439    private long nextCollision = NO_PAGE;
 440   
 441  5259 public HashPageHeader() {
 442    }
 443   
 444  0 public HashPageHeader(DataInput dis) throws IOException {
 445  0 super(dis);
 446    }
 447   
 448  2556 public synchronized void read(DataInput dis) throws IOException {
 449  2556 super.read(dis);
 450   
 451  2556 if (getStatus() == UNUSED) {
 452  2556 return;
 453    }
 454   
 455  0 created = dis.readLong();
 456  0 modified = dis.readLong();
 457  0 nextCollision = dis.readLong();
 458    }
 459   
 460  7603 public synchronized void write(DataOutput dos) throws IOException {
 461  7603 super.write(dos);
 462  7601 dos.writeLong(created);
 463  7603 dos.writeLong(modified);
 464  7604 dos.writeLong(nextCollision);
 465    }
 466   
 467  6034 public synchronized void setRecordLen(int recordLen) {
 468  6034 fileHeader.addTotalBytes(recordLen - getRecordLen());
 469  6034 super.setRecordLen(recordLen);
 470    }
 471   
 472    /** UNIX-time when this record was created */
 473  2967 public synchronized void setCreated(long created) {
 474  2964 this.created = created;
 475  2968 setDirty();
 476    }
 477   
 478    /** UNIX-time when this record was created */
 479  4974 public synchronized long getCreated() {
 480  4974 return created;
 481    }
 482   
 483    /** UNIX-time when this record was last modified */
 484  3017 public synchronized void setModified(long modified) {
 485  3017 this.modified = modified;
 486  3017 setDirty();
 487    }
 488   
 489    /** UNIX-time when this record was last modified */
 490  4975 public synchronized long getModified() {
 491  4975 return modified;
 492    }
 493   
 494    /** The next page for a Record collision (if any) */
 495  7154 public synchronized void setNextCollision(long nextCollision) {
 496  7153 this.nextCollision = nextCollision;
 497  7156 setDirty();
 498    }
 499   
 500    /** The next page for a Record collision (if any) */
 501  56425 public synchronized long getNextCollision() {
 502  56425 return nextCollision;
 503    }
 504    }
 505    }