Commits

Clayton Sims committed 9a82129

A bunch of updates to address some bugs and "freezes" with the logging system. Dumping logs was taking intractable (above 10 minutes) amounts of time due to the atomicity of the storage actions, and the number of logs for device reporting was simply too high. Addressed these by helping define the level of granularity for the delete all actions and lowering the fallback threshold.

Comments (0)

Files changed (6)

core/src/org/javarosa/core/services/Logger.java

 	}
 	
 	public static void exception (String info, Exception e) {
+		e.printStackTrace();
 		log("exception", (info != null ? info + ": " : "") + WrappedException.printException(e));
 	}
 	

j2me/core/src/org/javarosa/j2me/log/J2MELogger.java

 		synchronized(logStorage) {
 			if(!checkStorage()) { return null; }
 			
-			Vector<Integer> vIDs = getLogIDsInOrder();
-			
 			Vector logs = new Vector();
-			for (int i = vIDs.size() - 1; i >= 0; i--) {
-				logs.addElement(logStorage.read(vIDs.elementAt(i).intValue()));
+			for(IStorageIterator li = logStorage.iterate(); li.hasMore() ; ) {
+				logs.addElement((LogEntry)li.nextRecord());
 			}
 		
 			LogEntry[] collection = new LogEntry[logs.size()];
 			throw new WrappedException(rse);
 		}
 	}
-
-	private Vector<Integer> getLogIDsInOrder () {
-		SortedIntSet IDs = new SortedIntSet();
-		IStorageIterator li = logStorage.iterate();
-		while (li.hasMore()) {
-			IDs.add(li.nextID());
-		}
-		return IDs.getVector();
-	}
 	
 	public void serializeLogs(StreamLogSerializer serializer) throws IOException {
 		serializeLogs(serializer, 1 << 20);
 	public void serializeLogs(StreamLogSerializer serializer, int limit) throws IOException {
 		if(storageBroken) { return; };
 		
-		Vector<Integer> vIDs;
+		int count = 0;
+		
+		IStorageIterator li;
+		
+		//This should capture its own internal state when it starts to iterate.
 		synchronized(logStorage) {
-			if(!checkStorage()) { return; }
-			vIDs = getLogIDsInOrder();
+			li = logStorage.iterate();
 		}
-			
-		int start = vIDs.size() - 1;
-		int end = -1;
-		if (limit >= 0) {
-			end = Math.max(start - limit, end);
-		} else {
-			start = Math.min(-limit - 1, start);
+		
+		while(li.hasMore() && count < limit) {
+			int id = li.peekID();
+			LogEntry log = (LogEntry)li.nextRecord();
+			serializer.serializeLog(id, log);
+			count++;
 		}
 		
 		serializer.setPurger(new StreamLogSerializer.Purger () {
 				clearLogs(IDs);
 			}
 		});
-		
-		//this is technically not safe to have outside the synchronized block, but sending the logs
-		//via streaming may potentially take a very long time, and we don't want all other logging
-		//calls in the app to block in the meantime. extra log entries being added shouldn't
-		//interfere... just don't clear the logs!
-		for (int i = start; i > end; i--) {
-			int id = vIDs.elementAt(i).intValue();
-			serializer.serializeLog(id, (LogEntry)logStorage.read(id));
-		}
-
 	}
 
 	public int logSize() {

j2me/core/src/org/javarosa/j2me/storage/rms/RMSStorageIterator.java

 
 import org.javarosa.core.services.storage.IStorageIterator;
 import org.javarosa.core.services.storage.StorageModifiedException;
+import org.javarosa.core.util.SortedIntSet;
 import org.javarosa.core.util.externalizable.Externalizable;
 
 public class RMSStorageIterator implements IStorageIterator {
 	
 	public RMSStorageIterator (RMSStorageUtility store, Hashtable index) {
 		
-		Vector IDs = new Vector();
+		SortedIntSet IDs = new SortedIntSet();
+
 		for (Enumeration e = index.keys(); e.hasMoreElements(); ) {
-			IDs.addElement(e.nextElement());
+			IDs.add(((Integer)e.nextElement()).intValue());
 		}
 		this.index = index;
 		this.store = store;
-		this.IDs = IDs;
+		this.IDs = IDs.getVector();
 		pos = 0;
 		valid = true;
 	}

j2me/core/src/org/javarosa/j2me/storage/rms/RMSStorageUtility.java

 	 */
 	public void remove (int id) {
 		synchronized (getAccessLock()) {
-			RMSStorageInfo info = getInfoRecord();
-			Hashtable idIndex = getIDIndexRecord();
+			
+			//Start a transaction, if we aren't already inside of one
+			final String transactionKey = "remove";
+			beginDeleteAction(transactionKey);
+			
+			//Get the current transaction state
+			RMSStorageInfo info = (RMSStorageInfo)deleteActionCache[0];
+			Hashtable idIndex = (Hashtable)deleteActionCache[1];
+			
 			if (!idIndex.containsKey(new Integer(id))) {
 				throw new IllegalArgumentException("Record ID [" + id + "] not found");
 			}
-	
-			setDirty();
-						
+			
+			//Perform the actual deletion
 			RMSRecordLoc loc = (RMSRecordLoc)idIndex.get(new Integer(id));
 			txRecord(id, "delete");
 			getDataStore(loc.rmsID).removeRecord(loc.recID);
 			
+			//clean up metadata
 			info.numRecords--;
 			idIndex.remove(new Integer(id));
+			
+			//commit transaction (if we opened it)
+			completeDeleteAction(transactionKey);
+		}
+	}
+	
+	
+	//these two objects contain the meta state of the store during a delete transaction
+	//If they are null, no transaction should be ocurring
+	/** RMSStorageInfo, Hashtable **/
+	private Object[] deleteActionCache;
+	//The object key representing the current transaction
+	private Object transactionKey;
+	
+	//Begins an atomic delete transaction, and captures the current state of the storage
+	//which it will use until that transaction is complete
+	private Object[] beginDeleteAction(Object transactionKey) {
+		synchronized (getAccessLock()) {
+			//check to see if something else owns the transaction handle
+			if(this.transactionKey != null) {
+				if(!this.transactionKey.equals(transactionKey)) {
+					return deleteActionCache;
+				} else {
+					throw new RuntimeException("Improperly structured atomic delete action (multiple transaction openings from same path)");
+				}
+			}
+			
+			
+			RMSStorageInfo info = getInfoRecord();
+			Hashtable idIndex = getIDIndexRecord();
+				
+			deleteActionCache = new Object[] {info, idIndex};
+			this.transactionKey = transactionKey;
+
+			setDirty();
+			return deleteActionCache;
+		}
+	}
+	
+	//Commits the delete action by updating all of the metadata and index information,
+	//and releases the transaction locks
+	private void completeDeleteAction(Object transactionKey) {
+		synchronized (getAccessLock()) {
+			
+			//If there's no data cached, this transaction is completely incorrect. 
+			if(this.transactionKey == null || deleteActionCache == null) {
+				throw new RuntimeException("Improperly structured atomic delete action");
+			}
+			
+			//See if the action in question owns this transaction, if not
+			//we need to bail
+			if(!this.transactionKey.equals(transactionKey)) {
+				return;
+			}
+			
+			RMSStorageInfo info = (RMSStorageInfo)deleteActionCache[0];
+			Hashtable idIndex = (Hashtable)deleteActionCache[1];
+			
 			commitIndex(info, idIndex);
+			
+			deleteActionCache = null;
+			transactionKey = null;
+			
 			setClean();
 			storageModified();
 		}
 	}
+
 	
 	/**
 	 * Remove object from the store
 					IDs.addElement(new Integer(id));
 				}
 			}
-						
+			final String transactionKey = "removeAll";
+
+			//Begin an atomic delete action, and capture/cache state variables
+			beginDeleteAction(transactionKey);
+			
+			//Delete the records
 			for (int i = 0; i < IDs.size(); i++) {
 				int id = ((Integer)IDs.elementAt(i)).intValue();
 				remove(id);
 			}
 			
+			//Complete the action, and commit the cached variables.
+			completeDeleteAction(transactionKey);
+			
 			return IDs;
 		}
 	}

j2me/core/src/org/javarosa/j2me/storage/rms/RMSStorageUtilityIndexed.java

 			
 		super.remove(id);
 		
-		if (hasMetaData)
+		if (hasMetaData) {
 			removeMetaData(id, (IMetaData)old);
+		}
 	}
 	
 	public Vector getIDsForValue (String fieldName, Object value) {

j2me/misc/org.javarosa.log/src/org/javarosa/log/activity/DeviceReportState.java

 
 	public static boolean activated = false;
 	
-	private static final int LOG_ROLLOVER_SIZE = 2000;
+	private static final int LOG_ROLLOVER_SIZE = 750;
 	
 	private static final String XMLNS = "http://code.javarosa.org/devicereport";
 	
 			if(!ref.isReadOnly()) {
 				success = true;
 				try {
-					Logger._().serializeLogs(new LogWriter(ref.getOutputStream()));
+					LogWriter writer = new LogWriter(ref.getOutputStream());
+					Logger._().serializeLogs(writer);
 				} catch (IOException ioe) {
 					success = false;
 				}