Learning JDK Code – concurrent


Learning JDK Code – concurrent

CopyOnWriteArrayList
 package java.util.concurrent;  
 public class CopyOnWriteArrayList<E> implements List<E>, RandomAccess, Cloneable, java.io.Serializable {  
   /** The lock protecting all mutators */  
   transient final ReentrantLock lock = new ReentrantLock();  
   private volatile transient Object[] array;  
    * <p>The returned iterator provides a snapshot of the state of the list  
    * when the iterator was constructed. No synchronization is needed while  
    * traversing the iterator. The iterator does <em>NOT</em> support the  
    * <tt>remove method.  
   public Iterator<E> iterator() {  
     return new COWIterator<E>(getArray(), 0);  
   }    
   public E set(int index, E element) {  
      final ReentrantLock lock = this.lock;  
      lock.lock();  
      try {  
        Object[] elements = getArray();  
        Object oldValue = elements[index];  
        if (oldValue != element) {  
           int len = elements.length;  
     // create a new array  
           Object[] newElements = Arrays.copyOf(elements, len);  
           newElements[index] = element;  
           setArray(newElements);  
        } else {  
           // Not quite a no-op; ensures volatile write semantics  
           setArray(elements);  
        }  
        return (E)oldValue;  
      } finally {  
        lock.unlock();  
      }  
   }    
   public boolean add(E e) {  
      final ReentrantLock lock = this.lock;  
      lock.lock();  
      try {  
        Object[] elements = getArray();  
        int len = elements.length;  
        Object[] newElements = Arrays.copyOf(elements, len + 1);  
        newElements[len] = e;  
        setArray(newElements);  
        return true;  
      } finally {  
        lock.unlock();  
      }  
   }    
   public boolean remove(Object o) {  
      final ReentrantLock lock = this.lock;  
      lock.lock();  
      try {  
        Object[] elements = getArray();  
        int len = elements.length;  
        if (len != 0) {  
           // Copy while searching for element to remove  
           // This wins in the normal case of element being present  
           int newlen = len - 1;  
           Object[] newElements = new Object[newlen];  
           for (int i = 0; i < newlen; ++i) {  
             if (eq(o, elements[i])) {  
                // found one; copy remaining and exit  
                for (int k = i + 1; k < len; ++k)  
                  newElements[k-1] = elements[k];  
                setArray(newElements);  
                return true;  
             } else  
                newElements[i] = elements[i];  
           }  
           // special handling for last cell  
           if (eq(o, elements[newlen])) {  
             setArray(newElements);  
             return true;  
           }  
        }  
        return false;  
      } finally {  
        lock.unlock();  
      }  
   }  
   public E remove(int index) {  
      final ReentrantLock lock = this.lock;  
      lock.lock();  
      try {  
        Object[] elements = getArray();  
        int len = elements.length;  
        Object oldValue = elements[index];  
        int numMoved = len - index - 1;  
        if (numMoved == 0)  
           setArray(Arrays.copyOf(elements, len - 1));  
        else {  
           Object[] newElements = new Object[len - 1];  
           System.arraycopy(elements, 0, newElements, 0, index);  
           System.arraycopy(elements, index + 1, newElements, index,  
                      numMoved);  
           setArray(newElements);  
        }  
        return (E)oldValue;  
      } finally {  
        lock.unlock();  
      }  
   }    
   private static class COWIterator<E> implements ListIterator<E> {  
     /** Snapshot of the array **/  
     private final Object[] snapshot;  
     /** Index of element to be returned by subsequent call to next. */  
     private int cursor;  
     private COWIterator(Object[] elements, int initialCursor) {  
       cursor = initialCursor;  
       snapshot = elements;  
     }  
     public boolean hasNext() {  
       return cursor < snapshot.length;  
     }  
     public boolean hasPrevious() {  
       return cursor > 0;  
     }  
     public E next() {  
        if (! hasNext())  
         throw new NoSuchElementException();  
        return (E) snapshot[cursor++];  
     }  
     public E previous() {  
        if (! hasPrevious())  
         throw new NoSuchElementException();  
        return (E) snapshot[--cursor];  
     }  
     public int nextIndex() {  
       return cursor;  
     }  
     public int previousIndex() {  
       return cursor-1;  
     }  
     public void remove()|set(E e)|add(E e) {  
       throw new UnsupportedOperationException();  
     }  
   }  
 }    
ArrayBlockingQueue
 public class ArrayBlockingQueue<E> extends AbstractQueue<E>  
   implements BlockingQueue<E>, java.io.Serializable {  
 {  
   private final E[] items;  
   private int takeIndex;  
   private int putIndex;      
   /** Main lock guarding all access */  
   private final ReentrantLock lock;  
   /** Condition for waiting takes */  
   private final Condition notEmpty;  
   /** Condition for waiting puts */  
   private final Condition notFull;  
   // Blocked operations  
    * Inserts the specified element at the tail of this queue, waiting  
    * for space to become available if the queue is full.  
   public void put(E e) throws InterruptedException {  
     if (e == null) throw new NullPointerException();  
     final E[] items = this.items;  
     final ReentrantLock lock = this.lock;  
     lock.lockInterruptibly();  
     try {  
       try {  
         while (count == items.length)  
           notFull.await();  
       } catch (InterruptedException ie) {  
         notFull.signal(); // propagate to non-interrupted thread  
         throw ie;  
       }  
       insert(e);  
     } finally {  
       lock.unlock();  
     }  
   }    
    * Inserts the specified element at the tail of this queue, waiting  
    * for space to become available if the queue is full.    
   public E take() throws InterruptedException {  
     final ReentrantLock lock = this.lock;  
     lock.lockInterruptibly();  
     try {  
       try {  
         while (count == 0)  
           notEmpty.await();  
       } catch (InterruptedException ie) {  
         notEmpty.signal(); // propagate to non-interrupted thread  
         throw ie;  
       }  
       E x = extract();  
       return x;  
     } finally {  
       lock.unlock();  
     }  
   }  
   public E poll(long timeout, TimeUnit unit) throws InterruptedException {  
      long nanos = unit.toNanos(timeout);  
     final ReentrantLock lock = this.lock;  
     lock.lockInterruptibly();  
     try {  
       for (;;) {  
         if (count != 0) {  
           E x = extract();  
           return x;  
         }  
         if (nanos <= 0)  
           return null;  
         try {  
           nanos = notEmpty.awaitNanos(nanos);  
         } catch (InterruptedException ie) {  
           notEmpty.signal(); // propagate to non-interrupted thread  
           throw ie;  
         }  
       }  
     } finally {  
       lock.unlock();  
     }  
   }    
   public boolean offer(E e, long timeout, TimeUnit unit)  
     throws InterruptedException {  
     if (e == null) throw new NullPointerException();  
      long nanos = unit.toNanos(timeout);  
     final ReentrantLock lock = this.lock;  
     lock.lockInterruptibly();  
     try {  
       for (;;) {  
         if (count != items.length) {  
           insert(e);  
           return true;  
         }  
         if (nanos <= 0)  
           return false;  
         try {  
           nanos = notFull.awaitNanos(nanos);  
         } catch (InterruptedException ie) {  
           notFull.signal(); // propagate to non-interrupted thread  
           throw ie;  
         }  
       }  
     } finally {  
       lock.unlock();  
     }  
   }  
   public boolean contains(Object o) {  
     if (o == null) return false;  
     final E[] items = this.items;  
     final ReentrantLock lock = this.lock;  
     lock.lock();  
     try {  
       int i = takeIndex;  
       int k = 0;  
       while (k++ < count) {  
         if (o.equals(items[i]))  
           return true;  
         i = inc(i);  
       }  
       return false;  
     } finally {  
       lock.unlock();  
     }  
   }    
    * Inserts element at current put position, advances, and signals.  
    * Call only when holding lock.  
   private void insert(E x) {  
     items[putIndex] = x;  
     putIndex = inc(putIndex);  
     ++count;  
     notEmpty.signal();  
   }  
    * Extracts element at current take position, advances, and signals.  
    * Call only when holding lock.  
   private E extract() {  
     final E[] items = this.items;  
     E x = items[takeIndex];  
     items[takeIndex] = null;  
     takeIndex = inc(takeIndex);  
     --count;  
     notFull.signal();  
     return x;  
   }    
   final int inc(int i) {  
     return (++i == items.length)? 0 : i;  
 }  
   /**  
    * Returns an iterator over the elements in this queue in proper sequence.  
    * The returned Iterator is a "weakly consistent" iterator that  
    * will never throw {@link ConcurrentModificationException},  
    * and guarantees to traverse elements as they existed upon  
    * construction of the iterator, and may (but is not guaranteed to)  
    * reflect any modifications subsequent to construction.  
   public Iterator<E> iterator() {  
     final ReentrantLock lock = this.lock;  
     lock.lock();  
     try {  
       return new Itr();  
     } finally {  
       lock.unlock();  
     }  
 }  
   private class Itr implements Iterator<E> {  
      * Index of element to be returned by next,  
      * or a negative number if no such.  
     private int nextIndex;  
      * nextItem holds on to item fields because once we claim  
      * that an element exists in hasNext(), we must return it in  
      * the following next() call even if it was in the process of  
      * being removed when hasNext() was called.  
     private E nextItem;  
      * Index of element returned by most recent call to next.  
      * Reset to -1 if this element is deleted by a call to remove.  
     private int lastRet;  
     Itr() {  
       lastRet = -1;  
       if (count == 0)  
         nextIndex = -1;  
       else {  
         nextIndex = takeIndex;  
         nextItem = items[takeIndex];  
       }  
     }  
     public boolean hasNext() {  
        * No sync. We can return true by mistake here  
        * only if this iterator passed across threads,  
        * which we don't support anyway.  
       return nextIndex >= 0;  
     }  
      * Checks whether nextIndex is valid; if so setting nextItem.  
      * Stops iterator when either hits putIndex or sees null item.  
     private void checkNext() {  
       if (nextIndex == putIndex) {  
         nextIndex = -1;  
         nextItem = null;  
       } else {  
         nextItem = items[nextIndex];  
         if (nextItem == null)  
           nextIndex = -1;  
       }  
     }  
     public E next() {  
       final ReentrantLock lock = ArrayBlockingQueue.this.lock;  
       lock.lock();  
       try {  
         if (nextIndex < 0)  
           throw new NoSuchElementException();  
         lastRet = nextIndex;  
         E x = nextItem;  
         nextIndex = inc(nextIndex);  
         checkNext();  
         return x;  
       } finally {  
         lock.unlock();  
       }  
     }  
     public void remove() {  
       final ReentrantLock lock = ArrayBlockingQueue.this.lock;  
       lock.lock();  
       try {  
         int i = lastRet;  
         if (i == -1)  
           throw new IllegalStateException();  
         lastRet = -1;  
         int ti = takeIndex;  
         removeAt(i);  
         // back up cursor (reset to front if was first element)  
         nextIndex = (i == ti) ? takeIndex : i;  
         checkNext();  
       } finally {  
         lock.unlock();  
       }  
     }  
   }  
 }  
LinkedBlockingQueue
 public class LinkedBlockingQueue<E> extends AbstractQueue<E>  
     implements BlockingQueue<E>, java.io.Serializable {  
   static class Node<E> {  
     E item;  
      * One of:  
      * - the real successor Node  
      * - this Node, meaning the successor is head.next  
      * - null, meaning there is no successor (this is the last node)  
     Node<E> next;  
     Node(E x) { item = x; }  
   }  
   /** Current number of elements */  
   private final AtomicInteger count = new AtomicInteger(0);  
   private transient Node<E> head;  
   private transient Node<E> last;  
   /** Lock held by take, poll, etc */  
   private final ReentrantLock takeLock = new ReentrantLock();  
   /** Wait queue for waiting takes */  
   private final Condition notEmpty = takeLock.newCondition();  
   /** Lock held by put, offer, etc */  
   private final ReentrantLock putLock = new ReentrantLock();  
   /** Wait queue for waiting puts */  
   private final Condition notFull = putLock.newCondition();   
   public LinkedBlockingQueue(int capacity) {  
     if (capacity <= 0) throw new IllegalArgumentException();  
     this.capacity = capacity;  
     last = head = new Node<E>(null);  
   }    
   public LinkedBlockingQueue(Collection<? extends E> c) {  
     this(Integer.MAX_VALUE);  
     final ReentrantLock putLock = this.putLock;  
     putLock.lock(); // Never contended, but necessary for visibility  
     try {  
       int n = 0;  
       for (E e : c) {  
         if (e == null)  
           throw new NullPointerException();  
         if (n == capacity)  
           throw new IllegalStateException("Queue full");  
         enqueue(e);  
         ++n;  
       }  
       count.set(n);  
     } finally {  
       putLock.unlock();  
     }  
   }    
   public E take() throws InterruptedException {  
     E x;  
     int c = -1;  
     final AtomicInteger count = this.count;  
     final ReentrantLock takeLock = this.takeLock;  
     takeLock.lockInterruptibly();  
     try {  
         while (count.get() == 0) {  
           notEmpty.await();  
         }  
       x = dequeue();  
       c = count.getAndDecrement();  
       if (c > 1)  
         notEmpty.signal();  
     } finally {  
       takeLock.unlock();  
     }  
     if (c == capacity)  
       signalNotFull();  
     return x;  
   }  
    * Inserts the specified element at the tail of this queue, waiting if  
    * necessary for space to become available.    
   public void put(E e) throws InterruptedException {  
     if (e == null) throw new NullPointerException();  
     // Note: convention in all put/take/etc is to preset local var  
     // holding count negative to indicate failure unless set.  
     int c = -1;  
     final ReentrantLock putLock = this.putLock;  
     final AtomicInteger count = this.count;  
     putLock.lockInterruptibly();  
     try {  
       /*  
        * Note that count is used in wait guard even though it is  
        * not protected by lock. This works because count can  
        * only decrease at this point (all other puts are shut  
        * out by lock), and we (or some other waiting put) are  
        * signalled if it ever changes from  
        * capacity. Similarly for all other uses of count in  
        * other wait guards.  
        */  
       while (count.get() == capacity) {   
           notFull.await();  
       }  
       enqueue(e);  
       c = count.getAndIncrement();  
       if (c + 1 < capacity)  
         notFull.signal();  
     } finally {  
       putLock.unlock();  
     }  
     if (c == 0)  
       signalNotEmpty();  
   }  
   public boolean offer(E e, long timeout, TimeUnit unit)  
     throws InterruptedException {  
     if (e == null) throw new NullPointerException();  
     long nanos = unit.toNanos(timeout);  
     int c = -1;  
     final ReentrantLock putLock = this.putLock;  
     final AtomicInteger count = this.count;  
     putLock.lockInterruptibly();  
     try {  
       while (count.get() == capacity) {  
         if (nanos <= 0)  
           return false;  
         nanos = notFull.awaitNanos(nanos);  
       }  
       enqueue(e);  
       c = count.getAndIncrement();  
       if (c + 1 < capacity)  
         notFull.signal();  
     } finally {  
       putLock.unlock();  
     }  
     if (c == 0)  
       signalNotEmpty();  
     return true;  
   }  
   public E poll(long timeout, TimeUnit unit) throws InterruptedException {  
     E x = null;  
     int c = -1;  
     long nanos = unit.toNanos(timeout);  
     final AtomicInteger count = this.count;  
     final ReentrantLock takeLock = this.takeLock;  
     takeLock.lockInterruptibly();  
     try {  
         while (count.get() == 0) {   
          if (nanos <= 0)  
           return null;  
          nanos = notEmpty.awaitNanos(nanos);  
         }  
         x = dequeue();  
         c = count.getAndDecrement();  
         if (c > 1)  
           notEmpty.signal();  
     } finally {  
       takeLock.unlock();  
     }  
     if (c == capacity)  
       signalNotFull();  
     return x;  
   }  
   private void enqueue(E x) {  
     // assert putLock.isHeldByCurrentThread();  
     last = last.next = new Node<E>(x);  
   }  
   private E dequeue() {  
     // assert takeLock.isHeldByCurrentThread();  
     Node<E> h = head;  
     Node<E> first = h.next;  
     h.next = h; // help GC  
     head = first;  
     E x = first.item;  
     first.item = null;  
     return x;  
   }    
   public Iterator<E> iterator() {  
    return new Itr();  
   }  
   private class Itr implements Iterator<E> {  
     /*  
      * Basic weakly-consistent iterator. At all times hold the next  
      * item to hand out so that if hasNext() reports true, we will  
      * still have it to return even if lost race with a take etc.  
      */  
     private Node<E> current;  
     private Node<E> lastRet;  
     private E currentElement;  
     Itr() {  
       fullyLock();  
       try {  
         current = head.next;  
         if (current != null)  
           currentElement = current.item;  
       } finally {  
         fullyUnlock();  
       }  
     }  
     public boolean hasNext() {  
       return current != null;  
     }  
     public E next() {  
       fullyLock();  
       try {  
         if (current == null)  
           throw new NoSuchElementException();  
         E x = currentElement;  
         lastRet = current;  
         current = nextNode(current);  
         currentElement = (current == null) ? null : current.item;  
         return x;  
       } finally {  
         fullyUnlock();  
       }  
     }  
     public void remove() {  
       if (lastRet == null)  
         throw new IllegalStateException();  
       fullyLock();  
       try {  
         Node<E> node = lastRet;  
         lastRet = null;  
         for (Node<E> trail = head, p = trail.next;  
            p != null;  
            trail = p, p = p.next) {  
            if (p == node) {  
              unlink(p, trail);  
              break;  
            }  
         }  
       } finally {  
         fullyUnlock();  
       }  
     }  
   }  
   public boolean remove(Object o) {  
     if (o == null) return false;  
     fullyLock();  
     try {  
       for (Node<E> trail = head, p = trail.next;  
          p != null;  
          trail = p, p = p.next) {  
         if (o.equals(p.item)) {  
           unlink(p, trail);  
           return true;  
         }  
       }  
       return false;  
     } finally {  
       fullyUnlock();  
     }  
 }  
   public void clear() {  
     fullyLock();  
     try {  
       for (Node<E> p, h = head; (p = h.next) != null; h = p) {  
         h.next = h;  
         p.item = null;  
       }  
       head = last;  
       // assert head.item == null && head.next == null;  
       if (count.getAndSet(0) == capacity)  
         notFull.signal();  
     } finally {  
       fullyUnlock();  
     }  
   }  
   boolean isFullyLocked() {  
     return (putLock.isHeldByCurrentThread() &&  
         takeLock.isHeldByCurrentThread());  
   }  
 }  
PriorityQueue
Point – build min heap, and heapify, siftDown, use bitwise operators to improve performance.
 public class PriorityQueue<E> extends AbstractQueue<E>  
 implements java.io.Serializable {  
    * Priority queue represented as a balanced binary heap: the two  
    * children of queue[n] are queue[2*n+1] and queue[2*(n+1)]. The  
    * priority queue is ordered by comparator, or by the elements'  
    * natural ordering, if comparator is null: For each node n in the  
    * heap and each descendant d of n, n <= d. The element with the  
    * lowest value is in queue[0], assuming the queue is nonempty.  
 private transient Object[] queue;  
    * The number of elements in the priority queue.  
   private int size = 0;  
    * The comparator, or null if priority queue uses elements'  
    * natural ordering.  
   private final Comparator<? super E> comparator;  
    * The number of times this priority queue has been  
    * <i>structurally modified</i>. See AbstractList for gory details.  
  private transient int modCount = 0;  
 private void initFromCollection(Collection<? extends E> c) {  
     Object[] a = c.toArray();  
     // If c.toArray incorrectly doesn't return Object[], copy it.  
     if (a.getClass() != Object[].class)  
       a = Arrays.copyOf(a, a.length, Object[].class);  
     queue = a;  
     size = a.length;  
 }  
 // provide a better api to client  
   public PriorityQueue(PriorityQueue<? extends E> c) {  
     comparator = (Comparator<? super E>)c.comparator();  
     initFromCollection(c);  
   }  
   public PriorityQueue(Collection<? extends E> c) {  
     initFromCollection(c);  
     if (c instanceof SortedSet)  
       comparator = (Comparator<? super E>)  
         ((SortedSet<? extends E>)c).comparator();  
     else if (c instanceof PriorityQueue)  
       comparator = (Comparator<? super E>)  
         ((PriorityQueue<? extends E>)c).comparator();  
     else {  
       comparator = null;  
       heapify();  
     }  
 }  
   private void heapify() {  
     for (int i = (size >>> 1) - 1; i >= 0; i--)  
       siftDown(i, (E) queue[i]);  
   }  
    * Inserts item x at position k, maintaining heap invariant by  
    * demoting x down the tree repeatedly until it is less than or  
    * equal to its children or is a leaf.  
   private void siftDown(int k, E x) {  
     if (comparator != null)  
       siftDownUsingComparator(k, x);  
     else  
       siftDownComparable(k, x);  
   }  
   private void siftDownComparable(int k, E x) {  
     Comparable<? super E> key = (Comparable<? super E>)x;  
     int half = size >>> 1;    // loop while a non-leaf  
     while (k < half) {  
       int child = (k << 1) + 1; // assume left child is least  
       Object c = queue[child];  
       int right = child + 1;  
       if (right < size &&  
         ((Comparable<? super E>) c).compareTo((E) queue[right]) > 0)  
         c = queue[child = right];  
       if (key.compareTo((E) c) <= 0)  
         break;  
       queue[k] = c;  
       k = child;  
     }  
     queue[k] = key;  
   }  
   private void siftDownUsingComparator(int k, E x) {  
     int half = size >>> 1;  
     while (k < half) {  
       int child = (k << 1) + 1;  
       Object c = queue[child];  
       int right = child + 1;  
       if (right < size &&  
         comparator.compare((E) c, (E) queue[right]) > 0)  
         c = queue[child = right];  
       if (comparator.compare(x, (E) c) <= 0)  
         break;  
       queue[k] = c;  
       k = child;  
     }  
     queue[k] = x;  
 }  
 // be careful to check overflow when do four arithmetic operations.  
   private void grow(int minCapacity) {  
     if (minCapacity < 0) // overflow  
       throw new OutOfMemoryError();  
      int oldCapacity = queue.length;  
     // Double size if small; else grow by 50%  
     int newCapacity = ((oldCapacity < 64)?  
               ((oldCapacity + 1) * 2):  
               ((oldCapacity / 2) * 3));  
     if (newCapacity < 0) // overflow  
       newCapacity = Integer.MAX_VALUE;  
     if (newCapacity < minCapacity)  
       newCapacity = minCapacity;  
     queue = Arrays.copyOf(queue, newCapacity);  
 }  
 // throws NullPointerException explicitly, and document the NPE.  
    * Inserts the specified element into this priority queue.  
    * @throws ClassCastException if the specified element cannot be  
    *     compared with elements currently in this priority queue  
    *     according to the priority queue's ordering  
    * @throws NullPointerException if the specified element is null  
    */  
   public boolean offer(E e) {  
     if (e == null)  
       throw new NullPointerException();  
     modCount++;  
     int i = size;  
     if (i >= queue.length)  
       grow(i + 1);  
     size = i + 1;  
     if (i == 0)  
       queue[0] = e;  
     else  
       siftUp(i, e);  
     return true;  
   }  
    * To simplify and speed up coercions and comparisons. the  
    * Comparable and Comparator versions are separated into different  
    * methods that are otherwise identical. (Similarly for siftDown.)  
   private void siftUp(int k, E x) {  
     if (comparator != null)  
       siftUpUsingComparator(k, x);  
     else  
       siftUpComparable(k, x);  
   }  
   private void siftUpComparable(int k, E x) {  
     Comparable<? super E> key = (Comparable<? super E>) x;  
     while (k > 0) {  
       int parent = (k - 1) >>> 1;  
       Object e = queue[parent];  
       if (key.compareTo((E) e) >= 0)  
         break;  
       queue[k] = e;  
       k = parent;  
     }  
     queue[k] = key;  
   }  
   private void siftUpUsingComparator(int k, E x) {  
     while (k > 0) {  
       int parent = (k - 1) >>> 1;  
       Object e = queue[parent];  
       if (comparator.compare(x, (E) e) >= 0)  
         break;  
       queue[k] = e;  
       k = parent;  
     }  
     queue[k] = x;  
 }  
   public E poll() {  
     if (size == 0)  
       return null;  
     int s = --size;  
     modCount++;  
     E result = (E) queue[0];  
     E x = (E) queue[s];  
     queue[s] = null;  
     if (s != 0)  
       siftDown(0, x);  
     return result;  
   }  
   public boolean remove(Object o) {  
      int i = indexOf(o);  
      if (i == -1)  
        return false;  
      else {  
        removeAt(i);  
        return true;  
      }  
 }  
    * Normally this method leaves the elements at up to i-1,  
    * inclusive, untouched. Under these circumstances, it returns  
    * null. Occasionally, in order to maintain the heap invariant,  
    * it must swap a later element of the list with one earlier than  
    * i. Under these circumstances, this method returns the element  
    * that was previously at the end of the list and is now at some  
    * position before i. This fact is used by iterator.remove so as to  
    * avoid missing traversing elements.  
   private E removeAt(int i) {  
     assert i >= 0 && i < size;  
     modCount++;  
     int s = --size;  
     if (s == i) // removed last element  
       queue[i] = null;  
     else {  
       E moved = (E) queue[s];  
       queue[s] = null;  
       siftDown(i, moved);  
       if (queue[i] == moved) {  
         siftUp(i, moved);  
         if (queue[i] != moved)  
           return moved;  
       }  
     }  
     return null;  
   }  
 *   String[] y = x.toArray(new String[0]);  
 * @throws NullPointerException if the specified array is null  
   public <T> T[] toArray(T[] a) {  
     if (a.length < size)  
       // Make a new array of a's runtime type, but my contents:  
       return (T[]) Arrays.copyOf(queue, size, a.getClass());  
      System.arraycopy(queue, 0, a, 0, size);  
     if (a.length > size)  
       a[size] = null;  
     return a;  
   }  
   public Iterator<E> iterator() {  
     return new Itr();  
   }  
   private final class Itr implements Iterator<E> {  
      * Index (into queue array) of element to be returned by  
      * subsequent call to next.  
     private int cursor = 0;  
      * Index of element returned by most recent call to next,  
      * unless that element came from the forgetMeNot list.  
      * Set to -1 if element is deleted by a call to remove.  
     private int lastRet = -1;  
      * A queue of elements that were moved from the unvisited portion of  
      * the heap into the visited portion as a result of "unlucky" element  
      * removals during the iteration. (Unlucky element removals are those  
      * that require a siftup instead of a siftdown.) We must visit all of  
      * the elements in this list to complete the iteration. We do this  
      * after we've completed the "normal" iteration.  
      *  
      * We expect that most iterations, even those involving removals,  
      * will not need to store elements in this field.  
     private ArrayDeque<E> forgetMeNot = null;  
      * Element returned by the most recent call to next iff that  
      * element was drawn from the forgetMeNot list.  
     private E lastRetElt = null;  
      * The modCount value that the iterator believes that the backing  
      * Queue should have. If this expectation is violated, the iterator  
      * has detected concurrent modification.  
     private int expectedModCount = modCount;  
     public boolean hasNext() {  
       return cursor < size ||  
         (forgetMeNot != null && !forgetMeNot.isEmpty());  
     }  
     public E next() {  
       if (expectedModCount != modCount)  
         throw new ConcurrentModificationException();  
       if (cursor < size)  
         return (E) queue[lastRet = cursor++];  
       if (forgetMeNot != null) {  
         lastRet = -1;  
         lastRetElt = forgetMeNot.poll();  
         if (lastRetElt != null)  
           return lastRetElt;  
       }  
       throw new NoSuchElementException();  
     }  
     public void remove() {  
       if (expectedModCount != modCount)  
         throw new ConcurrentModificationException();  
       if (lastRet != -1) {  
         E moved = PriorityQueue.this.removeAt(lastRet);  
         lastRet = -1;  
         if (moved == null)  
           cursor--;  
         else {  
           if (forgetMeNot == null)  
             forgetMeNot = new ArrayDeque<E>();  
           forgetMeNot.add(moved);  
         }  
       } else if (lastRetElt != null) {  
         PriorityQueue.this.removeEq(lastRetElt);  
         lastRetElt = null;  
       } else {  
         throw new IllegalStateException();  
        }  
       expectedModCount = modCount;  
     }  
 }  
   private void writeObject(java.io.ObjectOutputStream s)  
     throws java.io.IOException{  
     // Write out element count, and any hidden stuff  
     s.defaultWriteObject();  
     // Write out array length, for compatibility with 1.5 version  
     s.writeInt(Math.max(2, size + 1));  
     // Write out all elements in the "proper order".  
     for (int i = 0; i < size; i++)  
       s.writeObject(queue[i]);  
 }  
   private void readObject(java.io.ObjectInputStream s)  
     throws java.io.IOException, ClassNotFoundException {  
     // Read in size, and any hidden stuff  
     s.defaultReadObject();  
     // Read in (and discard) array length  
     s.readInt();  
      queue = new Object[size];  
     // Read in all elements.  
     for (int i = 0; i < size; i++)  
       queue[i] = s.readObject();  
      // Elements are guaranteed to be in "proper order", but the  
      // spec has never explained what that might be.  
      heapify();  
 }  
 }  
DelayQueue
It uses PriorityQueue underlying.
 package java.util.concurrent;  
 * An unbounded {@linkplain BlockingQueue blocking queue} of  
  * <tt>Delayed</tt> elements, in which an element can only be taken  
  * when its delay has expired. The <em>head</em> of the queue is that  
  * <tt>Delayed</tt> element whose delay expired furthest in the  
  * past. If no delay has expired there is no head and <tt>poll</tt>  
  * will return <tt>null</tt>. Expiration occurs when an element's  
  * <tt>getDelay(TimeUnit.NANOSECONDS)</tt> method returns a value less  
  * than or equal to zero. Even though unexpired elements cannot be  
  * removed using <tt>take</tt> or <tt>poll</tt>, they are otherwise  
  * treated as normal elements. For example, the <tt>size</tt> method  
  * returns the count of both expired and unexpired elements.  
  * This queue does not permit null elements.   
 public class DelayQueue<E extends Delayed> extends AbstractQueue<E>  
   implements BlockingQueue<E> {  
   private transient final ReentrantLock lock = new ReentrantLock();  
   private transient final Condition available = lock.newCondition();  
   private final PriorityQueue<E> q = new PriorityQueue<E>();  
   public boolean offer(E e) {  
     final ReentrantLock lock = this.lock;  
     lock.lock();  
     try {  
       E first = q.peek();  
       q.offer(e);  
       if (first == null || e.compareTo(first) < 0)  
         available.signalAll();  
       return true;  
     } finally {  
       lock.unlock();  
     }  
 }  
    * Retrieves and removes the head of this queue, or returns <tt>null</tt>  
    * if this queue has no elements with an expired delay.  
    * @return the head of this queue, or <tt>null</tt> if this  
    *     queue has no elements with an expired delay  
   public E poll() {  
     final ReentrantLock lock = this.lock;  
     lock.lock();  
     try {  
       E first = q.peek();  
       if (first == null || first.getDelay(TimeUnit.NANOSECONDS) > 0)  
         return null;  
       else {  
         E x = q.poll();  
         assert x != null;  
         if (q.size() != 0)  
           available.signalAll();  
         return x;  
       }  
     } finally {  
       lock.unlock();  
     }  
   }  
    * Retrieves and removes the head of this queue, waiting if necessary  
    * until an element with an expired delay is available on this queue,  
    * or the specified wait time expires.  
   public E poll(long timeout, TimeUnit unit) throws InterruptedException {  
     long nanos = unit.toNanos(timeout);  
     final ReentrantLock lock = this.lock;  
     lock.lockInterruptibly();  
     try {  
       for (;;) {  
         E first = q.peek();  
         if (first == null) {  
           if (nanos <= 0)  
             return null;  
           else  
             nanos = available.awaitNanos(nanos);  
         } else {  
           long delay = first.getDelay(TimeUnit.NANOSECONDS);  
           if (delay > 0) {  
             if (nanos <= 0)  
               return null;  
             if (delay > nanos)  
               delay = nanos;  
             long timeLeft = available.awaitNanos(delay);  
             nanos -= delay - timeLeft;  
           } else {  
             E x = q.poll();  
             assert x != null;  
             if (q.size() != 0)  
               available.signalAll();  
             return x;  
           }  
         }  
       }  
     } finally {  
       lock.unlock();  
     }  
   }  
    * Retrieves and removes the head of this queue, waiting if necessary  
    * until an element with an expired delay is available on this queue.  
   public E take() throws InterruptedException {  
     final ReentrantLock lock = this.lock;  
     lock.lockInterruptibly();  
     try {  
       for (;;) {  
         E first = q.peek();  
         if (first == null) {  
           available.await();  
         } else {  
           long delay = first.getDelay(TimeUnit.NANOSECONDS);  
           if (delay > 0) {  
             long tl = available.awaitNanos(delay);  
           } else {  
             E x = q.poll();  
             assert x != null;  
             if (q.size() != 0)  
               available.signalAll(); // wake up other takers  
             return x;  
           }  
         }  
       }  
     } finally {  
       lock.unlock();  
     }  
   }  
    * Returns an iterator over all the elements (both expired and  
    * unexpired) in this queue. The iterator does not return the  
    * elements in any particular order. The returned  
    * <tt>Iterator</tt> is a "weakly consistent" iterator that will  
    * never throw {@link ConcurrentModificationException}, and  
    * guarantees to traverse elements as they existed upon  
    * construction of the iterator, and may (but is not guaranteed  
    * to) reflect any modifications subsequent to construction.  
   public Iterator<E> iterator() {  
     return new Itr(toArray());  
   }  
    * Snapshot iterator that works off copy of underlying q array.  
   private class Itr implements Iterator<E> {  
     final Object[] array; // Array of all elements  
      int cursor;      // index of next element to return;  
      int lastRet;     // index of last element, or -1 if no such  
     Itr(Object[] array) {  
       lastRet = -1;  
       this.array = array;  
     }  
     public boolean hasNext() {  
       return cursor < array.length;  
     }  
     public E next() {  
       if (cursor >= array.length)  
         throw new NoSuchElementException();  
       lastRet = cursor;  
       return (E)array[cursor++];  
     }  
     public void remove() {  
       if (lastRet < 0)  
           throw new IllegalStateException();  
       Object x = array[lastRet];  
       lastRet = -1;  
       // Traverse underlying queue to find == element,  
       // not just a .equals element.  
       lock.lock();  
       try {  
         for (Iterator it = q.iterator(); it.hasNext(); ) {  
           if (it.next() == x) {  
             it.remove();  
             return;  
           }  
         }  
       } finally {  
         lock.unlock();  
       }  
     }  
 }  
 }  
 * A mix-in style interface for marking objects that should be  
  * acted upon after a given delay.  
 * <p>An implementation of this interface must define a  
  * <tt>compareTo</tt> method that provides an ordering consistent with  
  * its <tt>getDelay</tt> method.  
 public interface Delayed extends Comparable<Delayed> {  
   long getDelay(TimeUnit unit);  
 }  
ExecutorCompletionService
 package java.util.concurrent;  
 public class ExecutorCompletionService<V> implements CompletionService<V> {  
   private final Executor executor;  
   private final AbstractExecutorService aes;  
 private final BlockingQueue<Future<V>> completionQueue;  
   public ExecutorCompletionService(Executor executor,  
                    BlockingQueue<Future<V>> completionQueue) {  
     if (executor == null || completionQueue == null)  
       throw new NullPointerException();  
     this.executor = executor;  
     this.aes = (executor instanceof AbstractExecutorService) ?  
       (AbstractExecutorService) executor : null;  
     this.completionQueue = completionQueue;  
   }  
   public Future<V> submit(Callable<V> task) {  
     if (task == null) throw new NullPointerException();  
     RunnableFuture<V> f = newTaskFor(task);  
     executor.execute(new QueueingFuture(f));  
     return f;  
   }  
   public Future<V> submit(Runnable task, V result) {  
     if (task == null) throw new NullPointerException();  
     RunnableFuture<V> f = newTaskFor(task, result);  
     executor.execute(new QueueingFuture(f));  
     return f;  
   }  
   public Future<V> take() throws InterruptedException {  
     return completionQueue.take();  
   }  
   public Future<V> poll() {  
     return completionQueue.poll();  
   }  
   public Future<V> poll(long timeout, TimeUnit unit) throws InterruptedException {  
     return completionQueue.poll(timeout, unit);  
   }  
   private class QueueingFuture extends FutureTask<Void> {  
     QueueingFuture(RunnableFuture<V> task) {  
       super(task, null);  
       this.task = task;  
     }  
     protected void done() { completionQueue.add(task); }  
     private final Future<V> task;  
   }  
   private RunnableFuture<V> newTaskFor(Callable<V> task) {  
     if (aes == null)  
       return new FutureTask<V>(task);  
     else  
       return aes.newTaskFor(task);  
   }  
   private RunnableFuture<V> newTaskFor(Runnable task, V result) {  
     if (aes == null)  
       return new FutureTask<V>(task, result);  
     else  
       return aes.newTaskFor(task, result);  
 }  
 }  
ExecutorService
 public interface ExecutorService extends Executor {  
   <T> Future<T> submit(Callable<T> task);  
   <T> Future<T> submit(Runnable task, T result);  
   Future<?> submit(Runnable task);  
   <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)  
     throws InterruptedException;  
   <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit)  
     throws InterruptedException;  
    * Executes the given tasks, returning the result  
    * of one that has completed successfully (i.e., without throwing  
    * an exception), if any do. Upon normal or exceptional return,  
    * tasks that have not completed are cancelled.  
    * The results of this method are undefined if the given  
    * collection is modified while this operation is in progress.  
   <T> T invokeAny(Collection<? extends Callable<T>> tasks)  
     throws InterruptedException, ExecutionException;  
   <T> T invokeAny(Collection<? extends Callable<T>> tasks,  
           long timeout, TimeUnit unit)  
     throws InterruptedException, ExecutionException, TimeoutException;  
   boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;  
   void shutdown();  
   List<Runnable> shutdownNow();  
   boolean isShutdown();  
   boolean isTerminated();  
 }  
Future task
 public interface Future<V> {  
   boolean cancel(boolean mayInterruptIfRunning);  
   boolean isCancelled();  
   boolean isDone();  
   V get() throws InterruptedException, ExecutionException;  
   V get(long timeout, TimeUnit unit)  
     throws InterruptedException, ExecutionException, TimeoutException;  
 }  
 public interface RunnableFuture<V> extends Runnable, Future<V> {  
   void run();  
 }  
 public class FutureTask<V> implements RunnableFuture<V> {}  

Learning Code – Concurrent solution



Learning Code – Concurrent solution

Building an Efficient, Scalable Result Cache
Caching a Future instead of a value creates the possibility of cache pollution: if a computation is cancelled or fails, future attempts to compute the result will also indicate cancellation or failure. To avoid this, Memoizer removes the Future from the cache if it detects that the computation was cancelled; it might also be desirable to remove the Future upon detecting a RuntimeException if the computation might succeed on a future attempt.
 public class Memoizer<A, V> implements Computable<A, V> {  
      private final ConcurrentMap<A, Future<V>> cache = new ConcurrentHashMap<A, Future<V>>();  
      private final Computable<A, V> c;  
      public Memoizer(Computable<A, V> c) {  
           this.c = c;  
      }  
      public V compute(final A arg) throws InterruptedException {  
           while (true) {  
                Future<V> f = cache.get(arg);  
                if (f == null) {  
                     Callable<V> eval = new Callable<V>() {  
                          public V call() throws InterruptedException {  
                               return c.compute(arg);  
                          }  
                     };  
                     FutureTask<V> ft = new FutureTask<V>(eval);  
                     f = cache.putIfAbsent(arg, ft);  
                     if (f == null) {  
                          f = ft;  
                          ft.run();  
                     }  
                }  
                try {  
                     return f.get();  
                } catch (CancellationException e) {  
                     cache.remove(arg, f);  
                } catch (ExecutionException e) {  
                     throw LaunderThrowable.launderThrowable(e.getCause());  
                }  
           }  
      }  
 }  
Turn collections into bounded collections.
 public class BoundedHashSet<T> {  
      private final Set<T> set;  
      private final Semaphore semaphore;  
      public BoundedHashSet(int bound) {  
           this.set = Collections.synchronizedSet(new HashSet<T>());  
           semaphore = new Semaphore(bound);  
      }  
      public boolean add(T o) throws InterruptedException {  
           semaphore.acquire();  
           boolean wasAdded = false;  
           try {  
                wasAdded = set.add(o);  
                return wasAdded;  
           } finally {  
                if (!wasAdded)  
                     semaphore.release();  
           }  
      }  
      public boolean remove(Object o) {  
           boolean wasRemoved = set.remove(o);  
           if (wasRemoved)  
                semaphore.release();  
           return wasRemoved;  
      }  
 }  

Use CompletionService to render page concurrently.

 public abstract class Renderer {  
      private final ExecutorService executor;  
      Renderer(ExecutorService executor) {  
           this.executor = executor;  
      }  
      void renderPage(CharSequence source) {  
           final List<ImageInfo> info = scanForImageInfo(source);  
           CompletionService<ImageData> completionService = new ExecutorCompletionService<ImageData>(  
                     executor);  
           for (final ImageInfo imageInfo : info)  
                completionService.submit(new Callable<ImageData>() {  
                     public ImageData call() {  
                          return imageInfo.downloadImage();  
                     }  
                });  
           renderText(source);  
           try {  
                for (int t = 0, n = info.size(); t < n; t++) {  
                     Future<ImageData> f = completionService.take();  
                     ImageData imageData = f.get();  
                     renderImage(imageData);  
                }  
           } catch (InterruptedException e) {  
                Thread.currentThread().interrupt();  
           } catch (ExecutionException e) {  
                throw LaunderThrowable.launderThrowable(e.getCause());  
           }  
      }  
      interface ImageData {  
      }  
      interface ImageInfo {  
           ImageData downloadImage();  
      }  
      abstract void renderText(CharSequence s);  
      abstract List<ImageInfo> scanForImageInfo(CharSequence s);  
      abstract void renderImage(ImageData i);  
 }  

Databases Notes


Dirty Reads - Designing Data-Intensive Web Applications
- see changes made by other database transaction which is not yet commited and mybe rollbacked later
Nonrepeatable Read
- a transaction reads the same query multiple times and results are not the same each time.
Phantom read
- one transcation executes same query multiple times, additional rows may appear or rows may disappear

Read uncommitted
Read committed - most basic
When reading from the database, you will only see data that has been committed (no dirty reads).
When writing to the database, you will only overwrite data that has been committed (no dirty writes).

to prevent dirty writes -  using row-level locks
to prevent dirty reads
- db remembers the old committed value, return the old value while the write transaction has not committed
- also the overwritten-but-not-yet-committed version.

Repeatable Read
- only sees data committed before the transaction began
Snapshot isolation - multiversion concurrency control (MVCC),
- create a new version every time a value is changed instead of update in place
- good for long-running, read-only queries such as backups and analytics
- db keeps several different committed versions of an object
- data tagged with transaction ID of the writer
- a created by field, containing the ID of the transaction that inserted this row into the table.
- a deleted by field, the ID of the transaction that requested the deletion.
- a garbage collection process removes any rows marked for deletion if it's sure that on transaction need access the delelted data
- update is translated into a delete and a create
- transaction IDs are used to decide which objects a transaction can see, and which are invisible

Serializable - highest isolation level
- emulates serial transaction execution, as if transactions had been executed one after another, serially, rather than concurrently

Non-correlated SubQuery
- In non correlated subquery, inner query doesn't depend on outer query and can run as stand alone query
- inner query executes before outer query
- mostly used IN or NOT IN
Correlated SubQuery
- Correlated subqueries are the one in which inner query or subquery reference outer query.
- Outer query needs to be executed before inner query
- slow, avoid it in favor of sql joins.
- mostly used exits and not exists

prepared statements
- better performance: db will parse, compile, and perform query optimization then store the optimized query plan
- prevent SQL injection attacks

1NF - all underlying domains contain atomic values only
2NF - every non key attribute is fully dependent on the primary key
3NF - every non key attribute is non-transitively dependent on the primary key

truncate vs delete - link
- truncate doesn't support where clause, delete does.
- rollback is possible with delete not with truncate
- truncate is fast delete is slow.
- truncate doesn't do logging delete logs on per row basis.
- delete acquires lock on table, truncate doesn't need lock
- truncate doesn't fire trigger, delete does.
- Don't delete, truncate it when it comes to purge tables.
- truncate reset identity column in table if any, delete doesn't.

Why Normalize?
1. Reduce Redundancy
I. Redundancy would waste more space.
One obvious drawback of data repetition is that it consumes more space and resources than is necessary.
II. Redundancy may cause inconsistency.
Moreover, redundancy introduces the possibility for error, and exposes us to inconsistencies whenever the data is not maintained in the same way. for example, these redundant data are not updated at same time.
2. Prevent insert, delete, update anomaly.
Insert Anomaly
The insert anomaly refers to a situation wherein one cannot insert a new row into a relation because of an artificial dependency on another relation.
Delete Anomaly
It refers to a situation wherein a deletion of data about one particular entity causes unintended loss of data that characterizes another entity.
Update Anomaly
The update anomaly refers to a situation where an update of a single data value requires multiple rows of data to be updated.
Example: Product, Customer, Invoice.
First Normal Form: Eliminating Repeating Data
A relation is said to be in first normal form when it contains no multivalued attributes.
To transform unnormalized relations into first normal form, we must move multivalued attributes and repeating groups to new relations
Second Normal Form: Eliminating Partial Dependencies
Arelationissaidtobein second normal formif itmeets both the following criteria:
The relation is in first normal form.
All non-key attributes are functionally dependent on the entire primary key.
Second normal form only applies to relations where we have concatenated primary keys.
Once we find a second normal form violation, the solution is to move the attribute(s) that is (are) partially dependent to a new relation where it depends on the entire key instead of part of the key.
Attribute B is functionally dependent on attribute A if at any moment in time, there is no more than one value of attribute B associated with a given value of attribute A.
In another word, it means A determines attribute B, or that A is a determinant (unique identifier) of attribute B.
In the INVOICE relation,we can easily see thatCustomerNumber is functionally dependent on Invoice Number because at any point in time, there can be only one value of Customer Number associated with a given value of Invoice Number.
Anti-example
INVOICE LINE ITEM: # Invoice Number, # Product Number, Product Description(only dependent on product number), Quantity, Unit Price, Extended Amount
Third Normal Form: Eliminating Transitive Dependencie
Arelationissaidtobein third normal formif itmeets both the following criteria:
The relation is in second normal form.
There is no transitive dependence (that is, all the non-key attributes depend only on the primary key).
To transforma second normal formrelation into third normal form, simplymove any transitively dependent attributes to relations where they depend only on the primary key. Be careful to leave the attribute on which they depend in the original relation as a foreign key.Youwill need it to reconstruct the original user viewvia a join.
An attribute that depends on another attribute that is not the primary key of the relation is said to be transitively dependent.
Anti-example
INVOICE: # Invoice Number, Customer Number, Customer Name(dependent on primary key attribute: Customer Number), Customer Address..., Terms, Ship Via, Order Date, Total Order Amount
The third normal form violation—a non-key attribute determining another non-key attribute
Boyce-Codd Normal Form - BCNF Form
Boyce-Codd normal form (BCNF) is a stronger version of third normal form. It addresses anomalies that occur when a non-key attribute is a determinant of an attribute that is part of the primary key (that is, when an attribute that is part of the primary key is functionally dependent on a non-key attribute).
The Boyce-Codd normal form has two requirements:
The relation must be in third normal form.
No determinants exist that are not either the primary key or a candidate key for the table. That is, a non-key attribute may not uniquely identify (determine) any other attribute, including one that participates in the primary key.
The solution is to split the unwanted determinant to a different table.
Anti-example
#Customer Number, #Product Line, Support Specialist Number
Fourth Normal Form
An additional anomaly surfaces when two or more multivalued attributes are included in the same relation.
Denormalization
Normalization leads tomore relations,which translates tomore tables and more joins.When database users suffer performance problems that cannot be resolved by other means, then denormalization may be required. Most database experts consider denormalization a last resort, if not an act of desperation.

Possible denormalization steps include the following:
Recombining relations that were split to satisfy normalization rules
Storing redundant data in tables
Storing summarized data in tables
What Is a Transaction?
A transaction is a unit of work that is composed of a discrete series of actions that must be either completely processed or not processed at all.
Transactions have properties ACID (Atomicity, Consistency, Isolation, Durability).
Atomicity
A transaction must remain whole. That is, it must completely succeed or completely fail. When it succeeds, all changes that were made by the transaction must be preserved by the system. Should a transaction fail, all changes that were made by it must be completely undone.
Consistency
A transaction should transform the database from one consistent state to another consistent state.
Isolation
Each transaction should carry out its work independent of any other transaction that might occur at the same time.
Durability
Changes made by completed transactions should remain permanent
Read Committed Isolation Level
Read Committed is the default isolation level in PostgreSQL. When a transaction runs on this isolation level, a SELECT query sees only data committed before the query began; it never sees either uncommitted data or changes committed during query execution by concurrent transactions.
In effect, a SELECT query sees a snapshot of the database as of the instant that that query begins to run. Notice that two successive SELECT commands can see different data, even though they are within a single transaction, if other transactions commit changes during execution of the first SELECT.
The level Serializable provides the strictest transaction isolation. This level emulates serial transaction execution, as if transactions had been executed one after another, serially, rather than concurrently.
In REPEATABLE READ, one transaction will not read committed change of another transaction.
set session transaction isolation level serializable;
Client 1:
mysql> start transaction;
mysql> select * from tt;
CLIENT 2:
mysql> update tt set d=2 where d=4;
ERROR 1205 (HY000): Lock wait timeout exceeded; try restarting transaction
This time, client 2 cannot update the row. After a few seconds of waiting, the attempt to update a row ends with the error message “Lock wait timeout exceeded”.
Both REPEATABLE READ and SERIALIZABLE don’t allow dirty reads, nonrepeatable reads and phantom reads to happen.
While REPEATABLE READ still allows another client to modify data, while he performs a transaction. The change made by concurrent transaction would be invisible to current transaction.
SERIALIZABLE strictly disallows other transaction change data current transaction operates on.
Serializable vs. Snapshot
They are SERIALIZABLE and SNAPSHOT. They are both made available in order to avoid dirty, non-repeatable or phantom reads, but they do so using different methods.
SERIALIZABLE
The SERIALIZABLE level prevents phantom reads by using range locks.
SERIALIZABLE transactions take range locks in order to prevent Phantom Reads.
SNAPSHOT
While SERIALIZABLE uses locks, instead SNAPSHOT uses a copy of committed data. Since no locks are taken, when subsequent changes are made by concurrent transactions, those changes are allowed and not blocked.
If you try to make a change to some data and that data has already been changed by concurrent transaction, you will get update conflict error message.
Non-unique indexes
A non-unique index is one in which any key value may occur multiple times. This type of index is defined with the keyword INDEX or KEY.
ALTER TABLE department ADD INDEX dept_name_idx (name);
Unique indexes
Unique indexes are indexes that help maintain data integrity by ensuring that no two rows of data in a table have identical key values, the exception is that NULL values may occur multiple times.
ALTER TABLE department ADD UNIQUE dept_name_idx (name);
A PRIMARY KEY also is a unique-valued index. It is similar to a UNIQUE index, but has additional restrictions:
A table may have multiple UNIQUE indexes, but at most one PRIMARY KEY.
A UNIQUE index can contain NULL values, whereas a PRIMARY KEY cannot.
A FULLTEXT index is specially designed for text searching.
SHOW INDEX FROM department \G
Index implementation
B-tree indexes
Branch nodes are used for navigating the tree, while leaf nodes hold the actual values and location information.
Bitmap indexes
Although B-tree indexes are great at handling columns that contain many different values, they can become unwieldy when built on a column that allows only a small number of values.
For columns that contain only a small number of values across a large number of rows (known as low-cardinality data), Oracle Database includes bitmap indexes, which generate a bitmap for each value stored in the column.
CREATE BITMAP INDEX acc_prod_idx ON account (product_cd);
Clustered Indexes
A clustered index determines the physical storage order of the data in a table.
A table can only have a single clustered index.
The leaf nodes of a clustered index contain the data pages of the underlying table.
By default, primary key creates a clustered index.
Query on clustered index is fast, as it only requires one lookup.
Insert is slower, as the insert must be added in the exact right place in the clustered index.
Non-clustered Index
Non-clustered indexes have the same B-tree structure as clustered indexes, except:
The data rows of the underlying table are not sorted and stored in order based on their non0clustered keys.
A table can contain multiple (up to 255) non-clustered index.
The leaf nodes of non-clustered Index do not consist of the data pages, only contain index pages:
If clustered index present, non-clustered index points back to the data pages in the clustered index.
If no clustered index, non-clustered indexes point to the actual data in the table.
Logical order of the index does not match the physical stored order of the rows on disk.
A unique key by default creates a non-clustered index.
A non-clustered Index has separate storage space different from the table storage.
This means that non-clustered indexes require a lot more hard disk space compared to clustered indexes.
Query on non-clustered indexes requires two lookups. First a lookup for the non-clustered index itself, then a lookup for the primary key.
Advantages and disadvantages of indexes
The optimizer chooses an index scan if the index columns are referenced in the SELECT statement and if the optimizer estimates that an index scan will be faster than a table scan.
Advantages of Index
Query optimization: Indexes make search queries much faster, because normally Index files generally are smaller and require less time to read than an entire table, particularly as tables grow larger. In addition, the entire index may not need to be scanned. The predicates that are applied to the index reduce the number of rows to be read from the data pages.
Uniqueness: Indexes like primary key index and unique index help to avoid duplicate row data.
Disadvantages of indexes
Every time a row is added, removed or updated, all indexes on that table must be modified. Therefore, the more indexes you have the more work the server needs to do to keep all schema objects up-to-date, which tends to slow things down.
Each index requires storage or disk space. The exact amount depends on the size of the table and the size and number of columns in the index.
Each index potentially adds an alternative access path for a query for the optimizer to consider, which increases the compilation time.
The best strategy is to add an index when a clear need arises. If you need an index for only special purposes, such as a monthly maintenance routine, you can always add the index, run the routine, and then drop the index until you need it again.
Constraint is a rule which can not be violated by end users.
There are five types of constraints:
Not null constraints:-which does not allows NULL values.
Unique constraints:-which does not allow duplication but allows NULL values.
Primary key constraints:-the key which does not allow duplication and null values. One table can only have one primary key.
Foreign key constraints:-the key used to refer primary key defined field in another table and it allows duplication.
Check constraint
Restrict the allowable values for a column
Practice
CONSTRAINT fk_product_type_cd FOREIGN KEY (product_type_cd)
  REFERENCES product_type (product_type_cd),
CONSTRAINT pk_product PRIMARY KEY (product_cd)

ALTER TABLE product
ADD CONSTRAINT pk_product PRIMARY KEY (product_cd);
ALTER TABLE product
ADD CONSTRAINT fk_product_type_cd FOREIGN KEY (product_type_cd)
  REFERENCES product_type (product_type_cd);
Constraints and Indexes
Constraint creation sometimes involves the automatic generation of an index. However, database servers behave differently regarding the relationship between constraints and indexes.
Cascading Constraints
ALTER TABLE product
ADD CONSTRAINT fk_product_type_cd FOREIGN KEY (product_type_cd)
  REFERENCES product_type (product_type_cd)
  ON UPDATE CASCADE
  ON DELETE CASCADE;
View
A view consists of a stored query accessible as a virtual table composed of the result set of a query. Unlike ordinary tables (base tables), a view is a virtual table, does not form part of the physical schema: it is a dynamic, virtual table computed or collated from data in the database.
Advantages of View
Views can provide advantages over tables:  
* Views can be used to make complex queries easy. A user can use a simple query on a view to display data from multiple tables without having the knowledge of how to join tables in queries.
* Views can simplify Statements for User, we can define frequently used joins, projections, and selections as views so that users do not have to specify all the conditions and qualifications each time an operation is performed on that data.
* Focus on the data that interests them and on the tasks for which they are responsible. Data that is not of interest to a user can be left out of the view.
* Different views can be created from the same data per the requirements, in this way we can display different data for different users.
* Provide additional level of security, through a view, users can query and modify only the data they can see. The rest of the database is neither visible nor accessible, so we can hide sensitive data from certain groups of users.
    * Views take very little space to store; the database contains only the definition of a view, not a copy of all the data it presents.
Disadvantages of Views
    * View affects performance, querying from view takes more time than directly querying from the table.
    * View depends on the table objects, when table is dropped, the view becomes inactive.
    * Rows available through a view are not sorted and are not ordered either, we can not use order by when define view.
Read-only vs. updatable views
We can define views as read-only or updatable.
Oracle, DB2 supports materialized views - pre-executed, non-virtual views commonly used in data warehousing.
In materialized view, the query result is cached as a concrete table that may be updated from the original base tables from time to time. This enables much more efficient access, at the cost of some data being potentially out-of-date. The accuracy of a materialized view depends on the frequency or trigger mechanisms behind its updates.
It is most useful in data warehousing scenarios, where frequent queries of the actual base tables can be extremely expensive.
Stored Procedure
A stored procedure is a precompiled subroutine on in database. Stored procedures are used to consolidate and centralize logic that was originally implemented in applications. Large or complex processing that might require the execution of several SQL statements can be moved into stored procedures, and all applications call the procedures only.
Benefits of using stored procedures
Reduced network usage between clients and servers
The stored procedure performs processing on the database server, without transmitting unnecessary data across the network. Only the records that are actually required by the client application are transmitted. Using a stored procedure can result in reduced network usage and better overall performance.
The more SQL statements that you group together in a stored procedure, the more you reduce network usage and the time that database locks are held.
Stored procedures are tunable. By having procedures that handle the database work for your interface, you eliminate the need to modify application source code to improve a query's performance. Changes can be made to the stored procedures that are transparent to the front-end interface.
Stored procedures are usually written by database developers/administrators. Persons holding these roles are usually more experienced in writing efficient queries and SQL statements. If you have your people performing the tasks to which they are best suited, then you will ultimately produce a better overall application.
Improved security, by including database privileges with stored procedures that use static SQL, the database administrator (DBA) can improve security, because you can enable controlled access to sensitive data by appropriate selection of the privileges a program has when it executes.
Centralized security, administration, and maintenance for common routines, by managing shared logic in one place at the server, you can simplify security, administration, and maintenance.
Disadvantages:
We encapsulate business logic in stored procedure, but stored procedures are incredibly tightly coupled to the specific database, it would be hard to switch from one database to another database vendor.

Trigger
A database trigger is procedural code that is automatically executed in response to certain events on a particular table or view in a database. Triggers are commonly used to prevent changes, log changes, audit changes, enhance changes, enforce or execute business rules.
Advantages of Triggers
The Main advantage of the trigger is automatic, whenever the table is affected by inserts update, delete, or query that time, the triggers will implicitly call.
Disadvantages of Triggers
Triggers run on every update made to the table therefore it adds more load to the database and cause the system to run slower.
It is not possible to track or debug triggers.
Viewing a trigger is difficult compared to tables, views stored procedures.
Triggers execution is invisible; it is easy to forget about triggers.
They serve two different purposes. A procedure executes only when called. A trigger is 'triggered' by a system event and allows to intercede on insert, update, delete.
We can write a trigger that calls a procedure. We can't really write a procedure that 'calls' a trigger.

MySQL
Innodb
- Row level locking
- default port 3306
- /etc/init.d/mysql start

How does InnoDB behave without a Primary Key?
if a table is declared with no PRIMARY KEY and no non-nullable UNIQUE KEY, InnoDB will automatically add a 6-byte (48-bit) integer column called ROW_ID to the table, and cluster the data based on that column.

all tables using such ROW_ID columns share the same global sequence counter which is part of the data dictionary. The maximum used value for all row IDs (well, technically the next ID to be used) is stored in the system tablespace (e.g. ibdata1) in page 7 (type SYS), within the data dictionary header (field DICT_HDR_ROW_ID).

This global sequence counter is protected by dict_sys->mutex, even for incrementing (as opposed to using atomic increment).

AUTO_INCREMENT Handling in InnoDB
innodb_autoinc_lock_mode = 0 (“traditional” lock mode)
- Table Lock until the statement finishes
innodb_autoinc_lock_mode = 1 (“consecutive” lock mode) - default
- “Simple inserts” (for which the number of rows to be inserted is known in advance) avoid table-level AUTO-INC locks by obtaining the required number of auto-increment values under the control of a mutex (a light-weight lock) that is only held for the duration of the allocation process, not until the statement completes.
innodb_autoinc_lock_mode = 2 (“interleaved” lock mode)

SQL
IS NULL
SELECT e1.Name, e2.Name FROM Employee e1 INNER JOIN Employee e2 ON e1.ManagerID = e2.EmployeeID
DELETE e1 FROM EMPLOYEE e1, EMPLOYEE e2 WHERE e1.name = e2.name AND e1.id > e2.id;

finding the 2nd highest salary - link
SELECT MAX(Salary) FROM Employee WHERE Salary NOT IN (SELECT MAX(Salary) FROM Employee )
SELECT salary  FROM (SELECT salary FROM Employee ORDER BY salary DESC LIMIT 2) AS emp ORDER BY salary LIMIT 1;

Find nth highest salary
SELECT Salary FROM Employee
ORDER BY Salary DESC LIMIT n-1,1

inefficeint
SELECT  FROM Employee Emp1
WHERE (N-1) = (
SELECT COUNT(DISTINCT(Emp2.Salary))
FROM Employee Emp2
WHERE Emp2.Salary > Emp1.Salary)

Labels

adsense (5) Algorithm (69) Algorithm Series (35) Android (7) ANT (6) bat (8) Big Data (7) Blogger (14) Bugs (6) Cache (5) Chrome (19) Code Example (29) Code Quality (7) Coding Skills (5) Database (7) Debug (16) Design (5) Dev Tips (63) Eclipse (32) Git (5) Google (33) Guava (7) How to (9) Http Client (8) IDE (7) Interview (88) J2EE (13) J2SE (49) Java (186) JavaScript (27) JSON (7) Learning code (9) Lesson Learned (6) Linux (26) Lucene-Solr (112) Mac (10) Maven (8) Network (9) Nutch2 (18) Performance (9) PowerShell (11) Problem Solving (11) Programmer Skills (6) regex (5) Scala (6) Security (9) Soft Skills (38) Spring (22) System Design (11) Testing (7) Text Mining (14) Tips (17) Tools (24) Troubleshooting (29) UIMA (9) Web Development (19) Windows (21) xml (5)