Commits

Anders Bergh committed 0de2184

xnu 2050.18.24

Comments (0)

Files changed (52)

bsd/hfs/hfs_vfsops.c

  *                                        FABN = old FABN - E.blockCount
  *
  * Inputs: 
- *	extent_info - This is the structure that contains state about 
- *	              the current file, extent, and extent record that 
- *	              is being relocated.  This structure is shared 
- *	              among code that traverses through all the extents 
- *	              of the file, code that relocates extents, and 
- *	              code that splits the extent. 
+ *	extent_info -   This is the structure that contains state about 
+ *	                the current file, extent, and extent record that 
+ *	                is being relocated.  This structure is shared 
+ *	                among code that traverses through all the extents 
+ *	                of the file, code that relocates extents, and 
+ *	                code that splits the extent. 
+ *	newBlockCount - The blockCount of the extent to be split after 
+ *	                successfully split operation.
  * Output:
  * 	Zero on success, non-zero on failure.
  */
 	extents = extent_info->extents;
 	cp = VTOC(extent_info->vp);
 
+	if (newBlockCount == 0) {
+		if (hfs_resize_debug) {
+			printf ("hfs_split_extent: No splitting required for newBlockCount=0\n");
+		}
+		return error;
+	}
+
 	if (hfs_resize_debug) {
 		printf ("hfs_split_extent: Split record:%u recStartBlock=%u %u:(%u,%u) for %u blocks\n", extent_info->overflow_count, extent_info->recStartBlock, index, extents[index].startBlock, extents[index].blockCount, newBlockCount);
 	}
 				goto out;
 			}
 			if (hfs_resize_debug) {
-				printf ("hfs_split_extent: Deleted record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock));
+				printf ("hfs_split_extent: Deleted extent record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock));
 			}
 		}
 
 			printf ("hfs_split_extent: Inserted extent record with startBlock=%u\n", write_recStartBlock);
 		}
 	}
+
+out:
+	/* 
+	 * Extents overflow btree or attributes btree headers might have 
+	 * been modified during the split/shift operation, so flush the 
+	 * changes to the disk while we are inside journal transaction.  
+	 * We should only be able to generate I/O that modifies the B-Tree 
+	 * header nodes while we're in the middle of a journal transaction.  
+	 * Otherwise it might result in panic during unmount.
+	 */
 	BTFlushPath(extent_info->fcb);
-out:
+
 	if (extents_rec) {
 		FREE (extents_rec, M_TEMP);
 	}
 	 */
 	if (oldStartBlock < allocLimit) {
 		newBlockCount = allocLimit - oldStartBlock;
-		
+
+		if (hfs_resize_debug) {
+			int idx = extent_info->extent_index;
+			printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+		}
+
 		/* If the extent belongs to a btree, check and trim 
 		 * it to be multiple of the node size. 
 		 */
 				if (remainder_blocks) {
 					newBlockCount -= remainder_blocks;
 					if (hfs_resize_debug) {
-						printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount);
+						printf ("hfs_reclaim_extent: Round-down newBlockCount to be multiple of nodeSize, node_allocblks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount);
 					}
 				}
 			}
-		}
-
-		if (hfs_resize_debug) {
-			int idx = extent_info->extent_index;
-			printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+			/* The newBlockCount is zero because of rounding-down so that
+			 * btree nodes are not split across extents.  Therefore this
+			 * straddling extent across resize-boundary does not require 
+			 * splitting.  Skip over to relocating of complete extent.
+			 */
+			if (newBlockCount == 0) {
+				if (hfs_resize_debug) {
+					printf ("hfs_reclaim_extent: After round-down newBlockCount=0, skip split, relocate full extent\n");
+				}
+				goto relocate_full_extent;
+			}
 		}
 
 		/* Split the extents into two parts --- the first extent lies
 		}
 		/* Split failed, so try to relocate entire extent */
 		if (hfs_resize_debug) {
-			printf ("hfs_reclaim_extent: Split straddling extent failed, reclocate full extent\n");
-		}
-	}
-
+			int idx = extent_info->extent_index;
+			printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks failed, relocate full extent\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+		}
+	}
+
+relocate_full_extent:
 	/* At this point, the current extent requires relocation.  
 	 * We will try to allocate space equal to the size of the extent 
 	 * being relocated first to try to relocate it without splitting.  

bsd/hfs/hfs_vnops.c

 
 	tdcp->c_flag |= C_FORCEUPDATE;  // XXXdbg - force it out!
 	(void) hfs_update(tdvp, 0);
+
+	/* Update the vnode's name now that the rename has completed. */
+	vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, 
+			tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
+
+	/* 
+	 * At this point, we may have a resource fork vnode attached to the 
+	 * 'from' vnode.  If it exists, we will want to update its name, because
+	 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
+	 *
+	 * Note that the only thing we need to update here is the name attached to
+	 * the vnode, since a resource fork vnode does not have a separate resource
+	 * cnode -- it's still 'fcp'.
+	 */
+	if (fcp->c_rsrc_vp) {
+		char* rsrc_path = NULL;
+		int len;
+
+		/* Create a new temporary buffer that's going to hold the new name */
+		MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
+		len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
+		len = MIN(len, MAXPATHLEN);
+
+		/* 
+		 * vnode_update_identity will do the following for us:
+		 * 1) release reference on the existing rsrc vnode's name.
+		 * 2) copy/insert new name into the name cache
+		 * 3) attach the new name to the resource vnode
+		 * 4) update the vnode's vid
+		 */
+		vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
+
+		/* Free the memory associated with the resource fork's name */
+		FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);	
+	}
 out:
 	if (got_cookie) {
 		cat_postflight(hfsmp, &cookie, p);

bsd/kern/kern_event.c

 filt_procattach(struct knote *kn)
 {
 	struct proc *p;
-	pid_t selfpid = (pid_t)0;
 
 	assert(PID_MAX < NOTE_PDATAMASK);
 	
 		return (ESRCH);
 	}
 
-	if ((kn->kn_sfflags & NOTE_EXIT) != 0) {
-		selfpid = proc_selfpid();
-		/* check for validity of NOTE_EXISTATUS */
-		if (((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) && 
-			((p->p_ppid != selfpid) && (((p->p_lflag & P_LTRACED) == 0) || (p->p_oppid != selfpid)))) {
+	const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
+
+	if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
+		do {
+			pid_t selfpid = proc_selfpid();
+
+			if (p->p_ppid == selfpid)
+				break;	/* parent => ok */
+
+			if ((p->p_lflag & P_LTRACED) != 0 &&
+			    (p->p_oppid == selfpid))
+				break;	/* parent-in-waiting => ok */
+
 			proc_rele(p);
-			return(EACCES);
-		}
-	}
+			return (EACCES);
+		} while (0);
 
 	proc_klist_lock();
 

bsd/kern/kern_exit.c

 	struct uthread * uth;
 	pid_t pid;
 	int exitval;
+	int knote_hint;
 
 	uth = (struct uthread *)get_bsdthread_info(current_thread());
 
 	p->task = TASK_NULL;
 	set_bsdtask_info(task, NULL);
 
-	/* exit status will be seen  by parent process */
-	proc_knote(p, NOTE_EXIT | (p->p_xstat & 0xffff));
+	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
+	if (p->p_oppid != 0) {
+		knote_hint |= NOTE_EXIT_REPARENTED;
+	}
+
+	proc_knote(p, knote_hint);
 
 	/* mark the thread as the one that is doing proc_exit
 	 * no need to hold proc lock in uthread_free

bsd/kern/kern_lockf.c

 #endif /* LOCKF_DEBUGGING */
 		error = msleep(lock, &vp->v_lock, priority, lockstr, 0);
 
-		if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
-		        if ((block = lf_getblock(lock, -1))) {
-				lf_move_blocked(block, lock);
+		if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
+			error = EBADF;
+
+		if (lock->lf_next) {
+			/*
+			 * lf_wakelock() always sets wakelock->lf_next to
+			 * NULL before a wakeup; so we've been woken early
+			 * - perhaps by a debugger, signal or other event.
+			 *
+			 * Remove 'lock' from the block list (avoids double-add
+			 * in the spurious case, which would create a cycle)
+			 */
+			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
+			lock->lf_next = NULL;
+
+			if (error == 0) {
+				/*
+				 * If this was a spurious wakeup, retry
+				 */
+				printf("%s: spurious wakeup, retrying lock\n",
+				    __func__);
+				continue;
 			}
 		}
 
-		if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
-			error = EBADF;
+		if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
+		        if ((block = lf_getblock(lock, -1)) != NULL)
+				lf_move_blocked(block, lock);
+		}
 
-		if (error) {	/* XXX */
-			/*
-			 * We may have been awakened by a signal and/or by a
-			 * debugger continuing us (in which cases we must remove
-			 * ourselves from the blocked list) and/or by another
-			 * process releasing a lock (in which case we have
-			 * already been removed from the blocked list and our
-			 * lf_next field set to NOLOCKF).
-			 */
-			if (lock->lf_next) {
-				TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
-				lock->lf_next = NOLOCKF;
-			}
+		if (error) {
 			if (!TAILQ_EMPTY(&lock->lf_blkhd))
 			        lf_wakelock(lock, TRUE);
-			  
 			FREE(lock, M_LOCKF);
 			return (error);
-		}	/* XXX */
+		}
 	}
+
 	/*
 	 * No blocks!!  Add the lock.  Note that we will
 	 * downgrade or upgrade any overlapping locks this
 			        struct lockf *tlock;
 
 			        TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
+					if (TAILQ_NEXT(tlock, lf_block) == tlock) {
+						/* See rdar://10887303 */
+						panic("cycle in wakelock list");
+					}
 				        tlock->lf_next = wakelock;
 				}
 			}

bsd/kern/uipc_mbuf.c

 
 	for (i = 0; i < numpages; i++, page += NBPG) {
 		ppnum_t offset = ((char *)page - (char *)mbutl) / NBPG;
-		ppnum_t new_page = pmap_find_phys(kernel_pmap,
-		    (vm_offset_t)page);
+		ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
 
 		/*
 		 * In the case of no mapper being available the following
 		 * mapper the appropriate I/O page is returned.
 		 */
 		VERIFY(offset < mcl_pages);
-		new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+		if (mcl_paddr_base) {
+		    bzero((void *)(uintptr_t) page, page_size);
+		    new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+		}
 		mcl_paddr[offset] = new_page << PGSHIFT;
 
 		/* Pattern-fill this fresh page */

bsd/netinet/tcp_lro.c

 
 	case TCP_LRO_COALESCE:
 		if ((payload_len != 0) && (unknown_tcpopts == 0) && 
-			(tcpflags == 0) && (ecn == 0) && (to.to_flags & TOF_TS)) { 
+			(tcpflags == 0) && (ecn != IPTOS_ECN_CE) && (to.to_flags & TOF_TS)) { 
 			tcp_lro_coalesce(flow_id, lro_mb, tcp_hdr, payload_len,
 				drop_hdrlen, &to, 
 				(to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 4) : NULL,

bsd/netinet/tcp_output.c

 	u_int16_t	socket_id = get_socket_id(so);
 	int so_options = so->so_options;
 	struct rtentry *rt;
-	u_int32_t basertt, svc_flags = 0;
+	u_int32_t basertt, svc_flags = 0, allocated_len;
 	u_int32_t lro_ackmore = (tp->t_lropktlen != 0) ? 1 : 0;
 	struct mbuf *mnext = NULL;
 	int sackoptlen = 0;
 			goto send;
 		}
 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
-			tp->t_flags &= ~TF_MAXSEGSNT;
+			if (len >= tp->t_maxseg)
+				tp->t_flags |= TF_MAXSEGSNT;
+			else
+				tp->t_flags &= ~TF_MAXSEGSNT;
 			goto send;
 		}
 		if (sack_rxmit)
 		}
 	}
 
-/*#ifdef DIAGNOSTIC*/
-#if INET6
  	if (max_linkhdr + hdrlen > MCLBYTES)
 		panic("tcphdr too big");
-#else
- 	if (max_linkhdr + hdrlen > MHLEN)
-		panic("tcphdr too big");
-#endif
-/*#endif*/
 
 	/* Check if there is enough data in the send socket
 	 * buffer to start measuring bw 
 			tcpstat.tcps_sndrexmitpack++;
 			tcpstat.tcps_sndrexmitbyte += len;
 			if (nstat_collect) {
-				nstat_route_tx(tp->t_inpcb->inp_route.ro_rt, 1, len, NSTAT_TX_FLAG_RETRANSMIT);
+				nstat_route_tx(tp->t_inpcb->inp_route.ro_rt, 1, 
+					len, NSTAT_TX_FLAG_RETRANSMIT);
 				locked_add_64(&tp->t_inpcb->inp_stat->txpackets, 1);
 				locked_add_64(&tp->t_inpcb->inp_stat->txbytes, len);
 				tp->t_stat.txretransmitbytes += len;
 				locked_add_64(&tp->t_inpcb->inp_stat->txbytes, len);
 			}
 		}
-#ifdef notyet
-		if ((m = m_copypack(so->so_snd.sb_mb, off,
-		    (int)len, max_linkhdr + hdrlen)) == 0) {
-			error = ENOBUFS;
-			goto out;
-		}
-		/*
-		 * m_copypack left space for our hdr; use it.
-		 */
-		m->m_len += hdrlen;
-		m->m_data -= hdrlen;
-#else
 		/*
 		 * try to use the new interface that allocates all 
 		 * the necessary mbuf hdrs under 1 mbuf lock and 
 		 * data area (no cluster attached)
 		 */
 		m = NULL;
-#if INET6
+
+		/* minimum length we are going to allocate */
+		allocated_len = MHLEN;
  		if (MHLEN < hdrlen + max_linkhdr) {
-		        MGETHDR(m, M_DONTWAIT, MT_HEADER);	/* MAC-OK */
+			MGETHDR(m, M_DONTWAIT, MT_HEADER);
 			if (m == NULL) {
-			        error = ENOBUFS;
+				error = ENOBUFS;
 				goto out;
 			}
  			MCLGET(m, M_DONTWAIT);
  			}
 			m->m_data += max_linkhdr;
 			m->m_len = hdrlen;
+			allocated_len = MCLBYTES;
 		}
-#endif
-		if (len <= MHLEN - hdrlen - max_linkhdr) {
+		if (len <= allocated_len - hdrlen - max_linkhdr) {
 		        if (m == NULL) {
-			        MGETHDR(m, M_DONTWAIT, MT_HEADER);	/* MAC-OK */
+				VERIFY(allocated_len <= MHLEN);
+				MGETHDR(m, M_DONTWAIT, MT_HEADER);
 				if (m == NULL) {
-				        error = ENOBUFS;
+					error = ENOBUFS;
 					goto out;
 				}
 				m->m_data += max_linkhdr;
 				m->m_len = hdrlen;
 			}
 		}
-#endif
 		/*
 		 * If we're sending everything we've got, set PUSH.
 		 * (This will keep happy those implementations which only
 			error = ENOBUFS;
 			goto out;
 		}
-#if INET6
-		if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
-		    MHLEN >= hdrlen) {
-			MH_ALIGN(m, hdrlen);
-		} else
-#endif
-			m->m_data += max_linkhdr;
+		if (MHLEN < (hdrlen + max_linkhdr)) {
+ 			MCLGET(m, M_DONTWAIT);
+ 			if ((m->m_flags & M_EXT) == 0) {
+ 				m_freem(m);
+ 				error = ENOBUFS;
+ 				goto out;
+ 			}
+		}
+		m->m_data += max_linkhdr;
 		m->m_len = hdrlen;
 	}
 	m->m_pkthdr.rcvif = 0;

bsd/netinet/tcp_subr.c

 		_max_protohdr = TCP_MINPROTOHDR;
 		_max_protohdr = max_protohdr;	/* round it up */
 	}
-	if (max_linkhdr + max_protohdr > MHLEN)
+	if (max_linkhdr + max_protohdr > MCLBYTES)
 		panic("tcp_init");
 #undef TCP_MINPROTOHDR
 

bsd/netinet6/ip6_input.c

 	int i;
 	struct timeval tv;
 
+	_CASSERT((sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr)) <= 
+		_MHLEN);
+
 	PE_parse_boot_argn("net.inet6.ip6.scopedroute", &ip6_doscopedroute,
 	    sizeof (ip6_doscopedroute));
 
 {
 
 	int i;
-	
+
+	_CASSERT(PFKEY_ALIGN8(sizeof(struct sadb_msg)) <= _MHLEN);
+
 	sadb_mutex_grp_attr = lck_grp_attr_alloc_init();
 	sadb_mutex_grp = lck_grp_alloc_init("sadb", sadb_mutex_grp_attr);
 	sadb_mutex_attr = lck_attr_alloc_init();

bsd/sys/cprotect.h

 #define CP_READ_ACCESS 	0x1
 #define CP_WRITE_ACCESS 0x2
 
+/* 
+ * Check for this version when deciding to enable features
+ */
 #define CONTENT_PROTECTION_XATTR_NAME	"com.apple.system.cprotect"
 #define CP_NEW_MAJOR_VERS 4
 #define CP_PREV_MAJOR_VERS 2
 #define	NOTE_PCTRLMASK	(~NOTE_PDATAMASK)
 
 /*
+ * If NOTE_EXITSTATUS is present, provide additional info about exiting process.
+ */
+#define NOTE_EXIT_REPARENTED	0x00080000	/* exited while reparented */
+
+/*
  * data/hint fflags for EVFILT_VM, shared with userspace.
  */
 #define NOTE_VM_PRESSURE			0x80000000              /* will react on memory pressure */

bsd/vfs/vfs_fsevents.c

     uint32_t     num_dropped;
     uint64_t     max_event_id;
     struct fsevent_handle *fseh;
+    pid_t        pid;
+    char         proc_name[(2 * MAXCOMLEN) + 1];
 } fs_event_watcher;
 
 // fs_event_watcher flags
 #define WATCHER_CLOSING                0x0002
 #define WATCHER_WANTS_COMPACT_EVENTS   0x0004
 #define WATCHER_WANTS_EXTENDED_INFO    0x0008
-
+#define WATCHER_APPLE_SYSTEM_SERVICE   0x0010   // fseventsd, coreservicesd, mds
 
 #define MAX_WATCHERS  8
 static fs_event_watcher *watcher_table[MAX_WATCHERS];
     int (*)(const void *, const void *));
 
 
+
+/* From kdp_udp.c + user mode Libc - this ought to be in a library */
+static char *
+strnstr(char *s, const char *find, size_t slen)
+{
+  char c, sc;
+  size_t len;
+  
+  if ((c = *find++) != '\0') {
+    len = strlen(find);
+    do {
+      do {
+        if ((sc = *s++) == '\0' || slen-- < 1)
+          return (NULL);
+      } while (sc != c);
+      if (len > slen)
+        return (NULL);
+    } while (strncmp(s, find, len) != 0);
+    s--;
+  }
+  return (s);
+}
+
+static int
+is_ignored_directory(const char *path) {
+
+    if (!path) {
+      return 0;
+    }
+
+#define IS_TLD(x) strnstr((char *) path, x, MAXPATHLEN) 
+    if (IS_TLD("/.Spotlight-V100/") ||
+        IS_TLD("/.MobileBackups/") || 
+        IS_TLD("/Backups.backupdb/")) {
+        return 1;
+    }
+#undef IS_TLD
+    
+    return 0;
+}
+
 static void
 fsevents_internal_init(void)
 {
     return 1;
 }
 
-static int
-prefix_match_len(const char *str1, const char *str2)
-{
-    int len=0;
-
-    while(*str1 && *str2 && *str1 == *str2) {
-	len++;
-	str1++;
-	str2++;
-    }
-
-    if (*str1 == '\0' && *str2 == '\0') {
-	len++;
-    }
-
-    return len;
-}
-
-
-struct history_item {
-    kfs_event *kfse;
-    kfs_event *oldest_kfse;
-    int        counter;
-};
-
-static int
-compare_history_items(const void *_a, const void *_b)
-{
-    const struct history_item *a = (const struct history_item *)_a;
-    const struct history_item *b = (const struct history_item *)_b;
-
-    // we want a descending order
-    return (b->counter - a->counter);
-}
 
 #define is_throw_away(x)  ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
 
 #define KFSE_RECYCLED   0x0004
 
 int num_dropped         = 0;
-int num_combined_events = 0;
-int num_added_to_parent = 0;
 int num_parent_switch   = 0;
 int num_recycled_rename = 0;
 
-//
-// NOTE: you must call lock_fs_event_list() before calling
-//       this function.
-//
-static kfs_event *
-find_an_event(const char *str, int len, kfs_event *do_not_reuse, int *reuse_type, int *longest_match_len)
-{
-    kfs_event *kfse, *best_kfse=NULL;
-
-// this seems to be enough to find most duplicate events for the same vnode
-#define MAX_HISTORY  12 
-    struct history_item history[MAX_HISTORY];
-    int           i;
-
-    *longest_match_len = 0;
-    *reuse_type = 0;
-    
-    memset(history, 0, sizeof(history));
-
-    //
-    // now walk the list of events and try to find the best match
-    // for this event.  if we have a vnode, we look for an event
-    // that already references the vnode.  if we don't find one
-    // we'll also take the parent of this vnode (in which case it
-    // will be marked as having dropped events within it).
-    //
-    // if we have a string we look for the longest match on the
-    // path we have.
-    //
-
-    LIST_FOREACH(kfse, &kfse_list_head, kevent_list) {
-	int match_len;
-
-	//
-	// don't look at events that are still in the process of being
-	// created, have a null vnode ptr or rename/exchange events.
-	//
-	if (   (kfse->flags & KFSE_BEING_CREATED) || kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
-
-	    continue;
-	}
-	
-	if (str != NULL) {
-	    if (kfse->len != 0 && kfse->str != NULL) {
-		match_len = prefix_match_len(str, kfse->str);
-		if (match_len > *longest_match_len) {
-		    best_kfse = kfse;
-		    *longest_match_len = match_len;
-		}
-	    }
-	}
-
-	if (kfse == do_not_reuse) {
-	    continue;
-	}
-
-	for(i=0; i < MAX_HISTORY; i++) {
-	    if (history[i].kfse == NULL) {
-		break;
-	    }
-
-	    //
-	    // do a quick check to see if we've got two simple events
-	    // that we can cheaply combine.  if the event we're looking
-	    // at and one of the events in the history table are for the
-	    // same path then we'll just mark the newer event as combined
-	    // and recyle the older event.
-	    //
-	    if (history[i].kfse->str == kfse->str) {
-
-		OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &kfse->flags);
-		*reuse_type = KFSE_RECYCLED;
-		history[i].kfse->flags |= KFSE_RECYCLED_EVENT;
-		return history[i].kfse;
-	    }
-	}
-
-	if (i < MAX_HISTORY && history[i].kfse == NULL) {
-	    history[i].kfse = kfse;
-	    history[i].counter = 1;
-	} else if (i >= MAX_HISTORY) {
-	    qsort(history, MAX_HISTORY, sizeof(struct history_item), compare_history_items);
-
-	    // pluck off the lowest guy if he's only got a count of 1
-	    if (history[MAX_HISTORY-1].counter == 1) {
-		history[MAX_HISTORY-1].kfse = kfse;
-	    }
-	}
-    }
-
-    
-    if (str != NULL && best_kfse) {
-	if (*longest_match_len <= 1) {
-	    // if the best match we had was "/" then basically we're toast...
-	    *longest_match_len = 0;
-	    best_kfse = NULL;
-	} else if (*longest_match_len != len) {
-	    OSBitOrAtomic16(KFSE_CONTAINS_DROPPED_EVENTS, &best_kfse->flags);
-	    *reuse_type = KFSE_COLLAPSED;
-	} else {
-	    OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &best_kfse->flags);
-	    *reuse_type = KFSE_COMBINED;
-	}
-    }
-
-    return best_kfse;
-}
-
-
 static struct timeval last_print;
 
 //
 add_fsevent(int type, vfs_context_t ctx, ...) 
 {
     struct proc	     *p = vfs_context_proc(ctx);
-    int               i, arg_type, skip_init=0, longest_match_len, ret;
+    int               i, arg_type, ret;
     kfs_event        *kfse, *kfse_dest=NULL, *cur;
     fs_event_watcher *watcher;
     va_list           ap;
     int 	      error = 0, did_alloc=0, need_event_unlock = 0;
     dev_t             dev = 0;
     uint64_t          now, elapsed;
-    int               reuse_type = 0;
     char             *pathbuff=NULL;
     int               pathbuff_len;
 
 
 
     if (kfse == NULL) {        // yikes! no free events
-	int len=0;
-	char *str;
-	
-	//
-	// Figure out what kind of reference we have to the
-	// file in this event.  This helps us find an event
-	// to combine/collapse into to make room.
-	//
-	// If we have a rename or exchange event then we
-	// don't want to go through the normal path, we
-	// want to "steal" an event instead (which is what
-	// find_an_event() will do if str is null).
-	//
-	arg_type = va_arg(ap, int32_t);
-	if (type == FSE_RENAME || type == FSE_EXCHANGE) {
-	    str = NULL;
-	} else if (arg_type == FSE_ARG_STRING) {
-	    len = va_arg(ap, int32_t);
-	    str = va_arg(ap, char *);
-	} else if (arg_type == FSE_ARG_VNODE) {
-	    struct vnode *vp;
-
-	    vp  = va_arg(ap, struct vnode *);
-	    pathbuff = get_pathbuff();
-	    pathbuff_len = MAXPATHLEN;
-	    if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
-		release_pathbuff(pathbuff);
-		pathbuff = NULL;
-	    }
-	    str = pathbuff;
-	} else {
-	    str = NULL;
-	}
-
-	//
-	// This will go through all events and find one that we
-        // can combine with (hopefully), or "collapse" into (i.e
-	// it has the same parent) or in the worst case we have
-	// to "recycle" an event which means that it will combine
-	// two other events and return us the now unused event.
-	// failing all that, find_an_event() could still return
-	// null and if it does then we have a catastrophic dropped
-	// events scenario.
-	//
-	kfse = find_an_event(str, len, NULL, &reuse_type, &longest_match_len);
-
-	if (kfse == NULL) {
-	  bail_early:
-	    
 	    unlock_fs_event_list();
 	    lock_watch_table();
 
 			    continue;
 			}
 			
-			printf("add_fsevent: watcher %p: num dropped %d rd %4d wr %4d q_size %4d flags 0x%x\n",
-			    watcher_table[ii], watcher_table[ii]->num_dropped,
-			    watcher_table[ii]->rd, watcher_table[ii]->wr,
-			    watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
+			printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
+			       watcher_table[ii]->proc_name,
+			       watcher_table[ii],
+			       watcher_table[ii]->rd, watcher_table[ii]->wr,
+			       watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
 		    }
 
 		    last_print = current_tv;
 		release_pathbuff(pathbuff);
 		pathbuff = NULL;
 	    }
-
 	    return ENOSPC;
 	}
 
-	if ((type == FSE_RENAME || type == FSE_EXCHANGE) && reuse_type != KFSE_RECYCLED) {
-	    panic("add_fsevent: type == %d but reuse type == %d!\n", type, reuse_type);
-	} else if ((kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) && kfse->dest == NULL) {
-	    panic("add_fsevent: bogus kfse %p (type %d, but dest is NULL)\n", kfse, kfse->type);
-	} else if (kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
-	    panic("add_fsevent: we should never re-use rename events (kfse %p reuse type %d)!\n", kfse, reuse_type);
-	}
-
-	if (reuse_type == KFSE_COLLAPSED) {
-	    if (str) {
-		const char *tmp_ptr, *new_str;
-		
-		//
-		// if we collapsed and have a string we have to chop off the
-		// tail component of the pathname to get the parent.
-		//
-		// NOTE: it is VERY IMPORTANT that we leave the trailing slash
-		//       on the pathname.  user-level code depends on this.
-		//
-		if (str[0] == '\0' || longest_match_len <= 1) {
-		    printf("add_fsevent: strange state (str %s / longest_match_len %d)\n", str, longest_match_len);
-		    if (longest_match_len < 0) {
-			panic("add_fsevent: longest_match_len %d\n", longest_match_len);
-		    }
-		}
-		// chop off the tail component if it's not the
-		// first character...
-		if (longest_match_len > 1) {
-		    str[longest_match_len] = '\0';
-		} else if (longest_match_len == 0) {
-		    longest_match_len = 1;
-		}
-
-		new_str = vfs_addname(str, longest_match_len, 0, 0);
-		if (new_str == NULL || new_str[0] == '\0') {
-		    panic("add_fsevent: longest match is strange (new_str %p).\n", new_str);
-		}
-		
-		lck_rw_lock_exclusive(&event_handling_lock);
-
-		kfse->len      = longest_match_len;
-		tmp_ptr        = kfse->str;
-		kfse->str = new_str;
-		kfse->ino      = 0;
-		kfse->mode     = 0;
-		kfse->uid      = 0;
-		kfse->gid      = 0;
-		
-		lck_rw_unlock_exclusive(&event_handling_lock);
-		
-		vfs_removename(tmp_ptr);
-	    } else {
-		panic("add_fsevent: don't have a vnode or a string pointer (kfse %p)\n", kfse);
-	    }
-	}
-
-	if (reuse_type == KFSE_RECYCLED && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
-	    
-	    // if we're recycling this kfse and we have a rename or
-	    // exchange event then we need to also get an event for
-	    // kfse_dest. 
-	    //
-	    if (did_alloc) {
-		// only happens if we allocated one but then failed
-		// for kfse_dest (and thus free'd the first one we
-		// allocated)
-		kfse_dest = zalloc_noblock(event_zone);
-		if (kfse_dest != NULL) {
-		    memset(kfse_dest, 0, sizeof(kfs_event));
-		    kfse_dest->refcount = 1;
-		    OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
-		} else {
-		    did_alloc = 0;
-		}
-	    }
-
-	    if (kfse_dest == NULL) {
-		int dest_reuse_type, dest_match_len;
-		
-		kfse_dest = find_an_event(NULL, 0, kfse, &dest_reuse_type, &dest_match_len);
-		
-		if (kfse_dest == NULL) {
-		    // nothing we can do... gotta bail out
-		    goto bail_early;
-		}
-
-		if (dest_reuse_type != KFSE_RECYCLED) {
-		    panic("add_fsevent: type == %d but dest_reuse type == %d!\n", type, dest_reuse_type);
-		}
-	    }
-	}
-
-
-	//
-	// Here we check for some fast-path cases so that we can
-	// jump over the normal initialization and just get on
-	// with delivering the event.  These cases are when we're
-	// combining/collapsing an event and so basically there is
-	// no more work to do (aside from a little book-keeping)
-	//
-	if (str && kfse->len != 0) {
-	    kfse->abstime = now;
-	    OSAddAtomic(1, &kfse->refcount);
-	    skip_init = 1;
-
-	    if (reuse_type == KFSE_COMBINED) {
-		num_combined_events++;
-	    } else if (reuse_type == KFSE_COLLAPSED) {
-		num_added_to_parent++;
-	    }
-	} else if (reuse_type != KFSE_RECYCLED) {
-	    panic("add_fsevent: I'm so confused! (reuse_type %d str %p kfse->len %d)\n",
-		  reuse_type, str, kfse->len);
-	}
-
-	va_end(ap);
-
-
-	if (skip_init) {
-	    if (kfse->refcount < 1) {
-		panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
-	    }
-
-	    last_event_ptr = kfse;
-	    unlock_fs_event_list();
-	    goto normal_delivery;
-	    
-	} else if (reuse_type == KFSE_RECYCLED || reuse_type == KFSE_COMBINED) {
-
-	    //
-	    // If we're here we have to clear out the kfs_event(s)
-	    // that we were given by find_an_event() and set it
-	    // up to be re-filled in by the normal code path.
-	    //
-	    va_start(ap, ctx);
-
-	    need_event_unlock = 1;
-	    lck_rw_lock_exclusive(&event_handling_lock);
-
-	    OSAddAtomic(1, &kfse->refcount);
-
-	    if (kfse->refcount < 1) {
-		panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
-	    }
-
-	    if (kfse->len == 0) {
-		panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
-		// vnode_rele_ext(kfse->fref.vp, O_EVTONLY, 0);
-	    } else {
-		vfs_removename(kfse->str);
-		kfse->len = 0;
-	    }
-	    kfse->str = NULL;
-
-	    if (kfse->kevent_list.le_prev != NULL) {
-		num_events_outstanding--;
-		if (kfse->type == FSE_RENAME) {
-		    num_pending_rename--;
-		}
-		LIST_REMOVE(kfse, kevent_list);
-		memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
-	    }
-
-	    kfse->flags = 0 | KFSE_RECYCLED_EVENT;
-	    
-	    if (kfse_dest) {
-		OSAddAtomic(1, &kfse_dest->refcount);
-		kfse_dest->flags = 0 | KFSE_RECYCLED_EVENT;
-
-		if (did_alloc == 0) {
-		    if (kfse_dest->len == 0) {
-			panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
-			// vnode_rele_ext(kfse_dest->fref.vp, O_EVTONLY, 0);
-		    } else {
-			vfs_removename(kfse_dest->str);
-			kfse_dest->len = 0;
-		    }
-		    kfse_dest->str = NULL;
-
-		    if (kfse_dest->kevent_list.le_prev != NULL) {
-			num_events_outstanding--;
-			LIST_REMOVE(kfse_dest, kevent_list);
-			memset(&kfse_dest->kevent_list, 0, sizeof(kfse_dest->kevent_list));
-		    }
-
-		    if (kfse_dest->dest) {
-			panic("add_fsevent: should never recycle a rename event! kfse %p\n", kfse);
-		    }
-		}
-	    }
-
-	    OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
-	    if (kfse_dest) {
-		OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
-	    }
-
-	    goto process_normally;
-	}
-    }
-
-    if (reuse_type != 0) {
-	panic("fsevents: we have a reuse_type (%d) but are about to clear out kfse %p\n", reuse_type, kfse);
-    }
-
-    //
-    // we only want to do this for brand new events, not
-    // events which have been recycled.
-    //
     memset(kfse, 0, sizeof(kfs_event));
     kfse->refcount = 1;
     OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
 
-  process_normally:
     last_event_ptr = kfse;
     kfse->type     = type;
     kfse->abstime  = now;
 	lck_rw_unlock_shared(&event_handling_lock);
     }
     
-  normal_delivery:
     // unlock this here so we don't hold it across the
     // event delivery loop.
     if (need_event_unlock) {
     }
 }
 
-
 static int
 add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
 {
     watcher->num_readers  = 0;
     watcher->max_event_id = 0;
     watcher->fseh         = fseh;
+    watcher->pid          = proc_selfpid();
+    proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
 
     watcher->num_dropped  = 0;      // XXXdbg - debugging
 
+    if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
+	!strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
+	!strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
+	watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
+    }
+
     lock_watch_table();
 
     // now update the global list of who's interested in
 	unlock_watch_table();
 	    
 	while (watcher->num_readers > 1 && counter++ < 5000) {
+	    lock_watch_table();
 	    fsevents_wakeup(watcher);      // in case they're asleep
+	    unlock_watch_table();
 	    
 	    tsleep(watcher, PRIBIO, "fsevents-close", 1);
 	}
 	}
 
 	// drain the event_queue 
+
 	while(watcher->rd != watcher->wr) {
-	    lck_rw_lock_shared(&event_handling_lock);
-
+	    lck_rw_lock_exclusive(&event_handling_lock);
 	    kfse = watcher->event_queue[watcher->rd];
-	    if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
-		panic("remove_watcher: bogus kfse %p during cleanup (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
+	    if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+		lck_rw_unlock_exclusive(&event_handling_lock);
+		break;
 	    }
-
-	    lck_rw_unlock_shared(&event_handling_lock);
-	    
+	    watcher->event_queue[watcher->rd] = NULL;
 	    watcher->rd = (watcher->rd+1) % watcher->eventq_size;
-
+	    OSSynchronizeIO();
 	    if (kfse != NULL) {
 		release_event_ref(kfse);
 	    }
+	    lck_rw_unlock_exclusive(&event_handling_lock);
 	}
+
 	    
 	if (watcher->event_list) {
 	    FREE(watcher->event_list, M_TEMP);
     // send any pending events if no more are received in the next 
     // EVENT_DELAY_IN_MS milli-seconds.
     //
-    if (   (watcher->rd < watcher->wr && (watcher->wr - watcher->rd) > MAX_NUM_PENDING)
-	|| (watcher->rd > watcher->wr && (watcher->wr + watcher->eventq_size - watcher->rd) > MAX_NUM_PENDING)) {
+	int32_t num_pending = 0;
+	if (watcher->rd < watcher->wr) {
+		num_pending = watcher->wr - watcher->rd;
+	}
 
-	fsevents_wakeup(watcher);
+	if (watcher->rd > watcher->wr) {
+		num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
+	}
 
-    } else if (timer_set == 0) {
+	if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
+	    /* Non-Apple Service is falling behind, start dropping events for this process */
 
-	schedule_event_wakeup();
-    }
-
-    return 0;
+	    lck_rw_lock_exclusive(&event_handling_lock);	    
+	    while (watcher->rd != watcher->wr) {
+	      kfse = watcher->event_queue[watcher->rd];
+	      if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+		  lck_rw_unlock_exclusive(&event_handling_lock);
+		  break;
+	      }
+	      watcher->event_queue[watcher->rd] = NULL;		
+	      watcher->rd = (watcher->rd+1) % watcher->eventq_size;
+	      OSSynchronizeIO();
+	      if (kfse != NULL) {
+		release_event_ref(kfse);
+	      }
+	    }
+	    lck_rw_unlock_exclusive(&event_handling_lock);
+	    
+	    printf("fsevents: watcher failing behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
+		   watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
+		   watcher->eventq_size, watcher->flags);
+	    
+	    watcher->flags |= WATCHER_DROPPED_EVENTS;
+	    fsevents_wakeup(watcher);
+	} else if (num_pending > MAX_NUM_PENDING) {
+	    fsevents_wakeup(watcher);
+	} else if (timer_set == 0) {
+	    schedule_event_wakeup();
+	}
+	
+	return 0;
 }
 
 static int
     user_ssize_t      last_full_event_resid;
     kfs_event        *kfse;
     uint16_t          tmp16;
+    int               skipped;
 
     last_full_event_resid = uio_resid(uio);
 
 	return EAGAIN;
     }
 
+ restart_watch:
     if (watcher->rd == watcher->wr) {
 	if (watcher->flags & WATCHER_CLOSING) {
 	    OSAddAtomic(-1, &watcher->num_readers);
 	watcher->flags &= ~WATCHER_DROPPED_EVENTS;
     }
 
+    skipped = 0;
     while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
 	if (watcher->flags & WATCHER_CLOSING) {
 	    break;
 	lck_rw_lock_shared(&event_handling_lock);
 
 	kfse = watcher->event_queue[watcher->rd];
-	if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
-	    panic("fmod_watch: someone left me a bogus kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
+	if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+	  lck_rw_unlock_shared(&event_handling_lock);
+	  break;
 	}
 
 	if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
 
+	  if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) & is_ignored_directory(kfse->str)) {
+	    // If this is not an Apple System Service, skip specified directories
+	    // radar://12034844
+	    error = 0;
+	    skipped = 1;
+	  } else {
+
+	    skipped = 0;
 	    if (last_event_ptr == kfse) {
 		last_event_ptr = NULL;
 		last_event_type = -1;
 	    }
 
 	    last_full_event_resid = uio_resid(uio);
+	  }
 	}
 
-	lck_rw_unlock_shared(&event_handling_lock);
-
 	watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
 	OSSynchronizeIO();
-	    
-	if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
-	    panic("fmod_watch:2: my kfse became bogus! kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
-	}
+	release_event_ref(kfse);
 
-	release_event_ref(kfse);
+	lck_rw_unlock_shared(&event_handling_lock);
+    }
+
+    if (skipped && error == 0) {
+      goto restart_watch;
     }
 
   get_out:
         // and decision to tsleep in fmod_watch... this bit of 
         // latency is a decent tradeoff against not having to
         // take and drop a lock in fmod_watch
+	lock_watch_table();
 	fsevents_wakeup(fseh->watcher);
+	unlock_watch_table();
 
 	tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
     }

bsd/vm/vnode_pager.c

 	int           		flags,
 	int 			*errorp)
 {
-        struct uthread	*ut;
         upl_page_info_t *pl;
 	int	        result = PAGER_SUCCESS;
 	int		error = 0;
 	if (errorp)
 		*errorp = result;
 
-	ut = get_bsdthread_info(current_thread());
-
-	if (ut->uu_lowpri_window) {
-	        /*
-		 * task is marked as a low priority I/O type
-		 * and the I/O we issued while in this page fault
-		 * collided with normal I/O operations... we'll
-		 * delay in order to mitigate the impact of this
-		 * task on the normal operation of the system
-		 */
-		throttle_lowpri_io(TRUE);
-	}
 	return (error);
 }
 

config/IOKit.exports

 __ZN8IOMapper17setMapperRequiredEb
 __ZN8IOMapper19copyMapperForDeviceEP9IOService
 __ZN8IOMapper19waitForSystemMapperEv
+__ZN8IOMapper13iovmMapMemoryEP8OSObjectjjjP13upl_page_infoPK21IODMAMapSpecification
 __ZN8IOMapper4freeEv
 __ZN8IOMapper5startEP9IOService
 __ZN8IOMapper7gSystemE

config/IOKit.i386.exports

 __ZN8IOMapper10iovmInsertEjmPjm
 __ZN8IOMapper11NewARTTableEmPPvPj
 __ZN8IOMapper12FreeARTTableEP6OSDatam
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
 __ZN8IOMapper18_RESERVEDIOMapper4Ev
 __ZN8IOMapper18_RESERVEDIOMapper5Ev
 __ZN8IOMapper18_RESERVEDIOMapper6Ev

config/IOKit.x86_64.exports

 __ZN8IOMapper10iovmInsertEjjPjj
 __ZN8IOMapper11NewARTTableEyPPvPj
 __ZN8IOMapper12FreeARTTableEP6OSDatay
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
 __ZN8IOMapper18_RESERVEDIOMapper4Ev
 __ZN8IOMapper18_RESERVEDIOMapper5Ev
 __ZN8IOMapper18_RESERVEDIOMapper6Ev

config/MasterVersion

-12.1.0
+12.2.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.

config/Private.exports

 _net_del_domain
 _net_del_proto
 _netboot_root
-_perf_monitor_register
+_perf_monitor_register_*
 _perf_monitor_unregister
 _pffinddomain
 _pffindproto

config/System6.0.exports

 __ZN8IOMapper11NewARTTableEmPPvPj
 __ZN8IOMapper12FreeARTTableEP6OSDatam
 __ZN8IOMapper17setMapperRequiredEb
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
 __ZN8IOMapper18_RESERVEDIOMapper4Ev
 __ZN8IOMapper18_RESERVEDIOMapper5Ev
 __ZN8IOMapper18_RESERVEDIOMapper6Ev

config/Unsupported.exports

 _NDR_record
 _PE_kputc
 __Z22OSFlushObjectTrackListv
+__ZN11IOMemoryMap9wireRangeEjyy
 __ZN15IOWatchDogTimer10gMetaClassE
 __ZN15IOWatchDogTimer10superClassE
 __ZN15IOWatchDogTimer13setPropertiesEP8OSObject
 _mig_user_deallocate
 _ml_io_map
 _ml_phys_read
+_ml_phys_read_byte_64
+_ml_phys_read_double_64
+_ml_phys_read_half_64
+_ml_phys_read_word_64
 _ml_phys_write
+_ml_phys_write_byte_64
+_ml_phys_write_double_64
+_ml_phys_write_half_64
+_ml_phys_write_word_64
 _ml_probe_read
 _ml_processor_register
 _ml_thread_policy

iokit/IOKit/IOBufferMemoryDescriptor.h

     kIOMemoryPhysicallyContiguous	= 0x00000010,
     kIOMemoryPageable	      		= 0x00000020,
     kIOMemoryPurgeable	      		= 0x00000040,
+    kIOMemoryHostPhysicallyContiguous  	= 0x00000080,
     kIOMemorySharingTypeMask		= 0x000f0000,
     kIOMemoryUnshared			= 0x00000000,
     kIOMemoryKernelUserShared		= 0x00010000,
 #endif
 					| kIOMemoryThreadSafe
 					| kIOMemoryClearEncrypt
+					| kIOMemoryMapperNone
 };
 
-#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_	1
+#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_		1
+#define _IOBUFFERMEMORYDESCRIPTOR_HOSTPHYSICALLYCONTIGUOUS_	1
 /*!
     @class IOBufferMemoryDescriptor
     @abstract Provides a simple memory descriptor that allocates its own buffer memory.

iokit/IOKit/IOHibernatePrivate.h

 IOReturn IOHibernateSystemPostWake(void);
 bool     IOHibernateWasScreenLocked(void);
 void     IOHibernateSetScreenLocked(uint32_t lockState);
+void     IOHibernateSystemRestart(void);
 
 #endif /* __cplusplus */
 
                         boolean_t * encryptedswap);
 kern_return_t 
 hibernate_teardown(hibernate_page_list_t * page_list,
-                    hibernate_page_list_t * page_list_wired);
+                    hibernate_page_list_t * page_list_wired,
+                    hibernate_page_list_t * page_list_pal);
 
 kern_return_t 
 hibernate_processor_setup(IOHibernateImageHeader * header);
 #define kIOHibernateRTCVariablesKey	"IOHibernateRTCVariables"
 #define kIOHibernateSMCVariablesKey	"IOHibernateSMCVariables"
 
-#define kIOHibernateBootSwitchVarsKey			"boot-switch-vars"
+#define kIOHibernateBootSwitchVarsKey	"boot-switch-vars"
+
+#define kIOHibernateBootNoteKey		"boot-note"
+
 
 #define kIOHibernateUseKernelInterpreter    0x80000000
 

iokit/IOKit/IOMapper.h

 
 #include <IOKit/IOService.h>
 #include <IOKit/IOMemoryDescriptor.h>
+#include <IOKit/IODMACommand.h>
 
 class OSData;
-class IODMACommand;
 
 extern const OSSymbol * gIOMapperIDKey;
 
     virtual ppnum_t iovmAllocDMACommand(IODMACommand * command, IOItemCount pageCount);
     virtual void iovmFreeDMACommand(IODMACommand * command, ppnum_t addr, IOItemCount pageCount);
     
+    virtual ppnum_t iovmMapMemory(
+    			  OSObject                    * memory,   // dma command or iomd
+			  ppnum_t                       offsetPage,
+			  ppnum_t                       pageCount,
+			  uint32_t                      options,
+			  upl_page_info_t             * pageList,
+			  const IODMAMapSpecification * mapSpecification);
+
     OSMetaClassDeclareReservedUsed(IOMapper, 0);
     OSMetaClassDeclareReservedUsed(IOMapper, 1);
     OSMetaClassDeclareReservedUsed(IOMapper, 2);
+    OSMetaClassDeclareReservedUsed(IOMapper, 3);
 
 private:
-    OSMetaClassDeclareReservedUnused(IOMapper, 3);
     OSMetaClassDeclareReservedUnused(IOMapper, 4);
     OSMetaClassDeclareReservedUnused(IOMapper, 5);
     OSMetaClassDeclareReservedUnused(IOMapper, 6);

iokit/IOKit/IOMemoryDescriptor.h

 
 class IOMemoryMap;
 class IOMapper;
+class IOService;
 
 /*
  * Direction of transfer, with respect to the described memory.
 
     kIOMemoryAsReference	= 0x00000100,
     kIOMemoryBufferPageable	= 0x00000400,
-    kIOMemoryMapperNone		= 0x00000800,
+    kIOMemoryMapperNone		= 0x00000800,	// Shared with Buffer MD
+    kIOMemoryHostOnly           = 0x00001000,   // Never DMA accessible
 #ifdef XNU_KERNEL_PRIVATE
     kIOMemoryRedirected		= 0x00004000,
     kIOMemoryPreparedReadOnly	= 0x00008000,
 
 #define	IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND	1
 
+struct IODMAMapSpecification
+{
+	uint64_t    alignment;
+	IOService * device;
+	uint32_t    options;
+	uint8_t     numAddressBits;
+	uint8_t     resvA[3];
+	uint32_t    resvB[4];
+};
+
+enum
+{
+    kIODMAMapWriteAccess          = 0x00000002,
+    kIODMAMapPhysicallyContiguous = 0x00000010,
+    kIODMAMapDeviceMemory         = 0x00000020,
+    kIODMAMapPagingPath           = 0x00000040,
+    kIODMAMapIdentityMap          = 0x00000080,
+};
+
+
 enum 
 {
     kIOPreparationIDUnprepared = 0,
 
 #ifdef XNU_KERNEL_PRIVATE
     IOMemoryDescriptorReserved * getKernelReserved( void );
+    IOReturn dmaMap(
+	IOMapper                    * mapper,
+	const IODMAMapSpecification * mapSpec,
+	uint64_t                      offset,
+	uint64_t                      length,
+	uint64_t                    * address,
+	ppnum_t                     * mapPages);
 #endif
 	
 private:
     IOReturn userClientUnmap();
 #endif /* XNU_KERNEL_PRIVATE */
 
+    IOReturn wireRange(
+    	uint32_t		options,
+        mach_vm_size_t		offset,
+        mach_vm_size_t		length);
+
     OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
     OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
     OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
 
     virtual uint64_t getPreparationID( void );
 
+#ifdef XNU_KERNEL_PRIVATE
+    // Internal APIs may be made virtual at some time in the future.
+    IOReturn wireVirtual(IODirection forDirection);
+    IOReturn dmaMap(
+	IOMapper                    * mapper,
+	const IODMAMapSpecification * mapSpec,
+	uint64_t                      offset,
+	uint64_t                      length,
+	uint64_t                    * address,
+	ppnum_t                     * mapPages);
+    bool initMemoryEntries(size_t size, IOMapper * mapper);
+#endif
+
 private:
 
 #ifndef __LP64__
     virtual void unmapFromKernel();
 #endif /* !__LP64__ */
 
-    // Internal APIs may be made virtual at some time in the future.
-    IOReturn wireVirtual(IODirection forDirection);
     void *createNamedEntry();
 
     // Internal

iokit/IOKit/pwr_mgt/IOPM.h

  *  false       == Retain FV key when going to standby mode
  *  not present == Retain FV key when going to standby mode
  */
-#define kIOPMDestroyFVKeyOnStandbyKey            "DestroyFVKeyOnStandby"
+#define kIOPMDestroyFVKeyOnStandbyKey       "DestroyFVKeyOnStandby"
 
 /*******************************************************************************
  *
      */
     kIOPMDriverAssertionPreventDisplaySleepBit      = 0x40,
 
-    kIOPMDriverAssertionReservedBit7                = 0x80
+    /*! kIOPMDriverAssertionReservedBit7
+     * Reserved for storage family.
+     */
+    kIOPMDriverAssertionReservedBit7                = 0x80,
+
+    /*! kIOPMDriverAssertionMagicPacketWakeEnabledBit
+     * When set, driver is informing PM that magic packet wake is enabled.
+     */
+    kIOPMDriverAssertionMagicPacketWakeEnabledBit   = 0x100
 };
 
  /* kIOPMAssertionsDriverKey

iokit/IOKit/pwr_mgt/IOPMPrivate.h

 /* @constant kIOPMTimelineDictionaryKey
  * @abstract RootDomain key for dictionary describing Timeline's info
  */
-#define     kIOPMTimelineDictionaryKey                  "PMTimelineLogging"
+#define kIOPMTimelineDictionaryKey              "PMTimelineLogging"
 
 /* @constant kIOPMTimelineEnabledKey
  * @abstract Boolean value indicating whether the system is recording PM events.
  * @discussion Key may be found in the dictionary at IOPMrootDomain's property 
  * kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
  */
-#define     kIOPMTimelineEnabledKey                     "TimelineEnabled"
+#define kIOPMTimelineEnabledKey                 "TimelineEnabled"
 
 /* @constant kIOMPTimelineSystemNumberTrackedKey
  * @abstract The maximum number of system power events the system may record.
  * @discussion Key may be found in the dictionary at IOPMrootDomain's property 
  * kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
  */
-#define     kIOPMTimelineSystemNumberTrackedKey         "TimelineSystemEventsTracked"
+#define kIOPMTimelineSystemNumberTrackedKey     "TimelineSystemEventsTracked"
 
 /* @constant kIOPMTimelineSystemBufferSizeKey
  * @abstract Size in bytes  of buffer recording system PM events
  * @discussion Key may be found in the dictionary at IOPMrootDomain's property 
  * kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
  */
-#define     kIOPMTimelineSystemBufferSizeKey            "TimelineSystemBufferSize"
+#define kIOPMTimelineSystemBufferSizeKey        "TimelineSystemBufferSize"
 
 
 
 #define kIOPMSleepWakeFailureUUIDKey        "UUID"
 #define kIOPMSleepWakeFailureDateKey        "Date"
 
-/******************************************************************************/
-/* System sleep policy
- * Shared between PM root domain and platform driver.
+/*****************************************************************************
+ *
+ * Root Domain private property keys
+ *
+ *****************************************************************************/
+
+/* kIOPMAutoPowerOffEnabledKey
+ * Indicates if Auto Power Off is enabled.
+ * It has a boolean value.
+ *  true        == Auto Power Off is enabled
+ *  false       == Auto Power Off is disabled
+ *  not present == Auto Power Off is not supported on this hardware
  */
+#define kIOPMAutoPowerOffEnabledKey         "AutoPowerOff Enabled"
 
-// Platform specific property added by the platform driver.
-// An OSData that describes the system sleep policy.
-#define kIOPlatformSystemSleepPolicyKey     "IOPlatformSystemSleepPolicy"
+/* kIOPMAutoPowerOffDelayKey
+ * Key refers to a CFNumberRef that represents the delay in seconds before
+ * entering the Auto Power Off state.  The property is not present if Auto
+ * Power Off is unsupported.
+ */
+#define kIOPMAutoPowerOffDelayKey           "AutoPowerOff Delay"
 
-// Root domain property updated before platform sleep.
-// An OSData that describes the system sleep parameters.
+/*****************************************************************************
+ *
+ * System Sleep Policy
+ *
+ *****************************************************************************/
+
+#define kIOPMSystemSleepPolicySignature     0x54504c53
+#define kIOPMSystemSleepPolicyVersion       2
+
+/*!
+ * @defined kIOPMSystemSleepTypeKey
+ * @abstract Indicates the type of system sleep.
+ * @discussion An OSNumber property of root domain that describes the type
+ * of system sleep. This property is set after notifying priority sleep/wake
+ * clients, but before informing interested drivers and shutting down power
+ * plane drivers.
+ */
+#define kIOPMSystemSleepTypeKey             "IOPMSystemSleepType"
+
+struct IOPMSystemSleepPolicyVariables
+{
+    uint32_t    signature;                  // kIOPMSystemSleepPolicySignature
+    uint32_t    version;                    // kIOPMSystemSleepPolicyVersion
+
+    uint64_t    currentCapability;          // current system capability bits
+    uint64_t    highestCapability;          // highest system capability bits
+
+    uint64_t    sleepFactors;               // sleep factor bits
+    uint32_t    sleepReason;                // kIOPMSleepReason*
+    uint32_t    sleepPhase;                 // identify the sleep phase
+    uint32_t    hibernateMode;              // current hibernate mode
+
+    uint32_t    standbyDelay;               // standby delay in seconds
+    uint32_t    poweroffDelay;              // auto-poweroff delay in seconds
+    uint32_t    scheduledAlarms;            // bitmask of scheduled alarm types
+
+    uint32_t    reserved[50];               // pad sizeof 256 bytes
+};
+
+enum {
+    kIOPMAlarmBitDebugWake                  = 0x01,
+    kIOPMAlarmBitCalendarWake               = 0x02,
+    kIOPMAlarmBitMaintenanceWake            = 0x04,
+    kIOPMAlarmBitSleepServiceWake           = 0x08
+};
+
+enum {
+    kIOPMSleepPhase1 = 1,
+    kIOPMSleepPhase2
+};
+
+// Sleep Factor Mask / Bits
+enum {
+    kIOPMSleepFactorSleepTimerWake          = 0x00000001ULL,
+    kIOPMSleepFactorLidOpen                 = 0x00000002ULL,
+    kIOPMSleepFactorACPower                 = 0x00000004ULL,
+    kIOPMSleepFactorBatteryLow              = 0x00000008ULL,
+    kIOPMSleepFactorStandbyNoDelay          = 0x00000010ULL,
+    kIOPMSleepFactorStandbyForced           = 0x00000020ULL,
+    kIOPMSleepFactorStandbyDisabled         = 0x00000040ULL,
+    kIOPMSleepFactorUSBExternalDevice       = 0x00000080ULL,
+    kIOPMSleepFactorBluetoothHIDDevice      = 0x00000100ULL,
+    kIOPMSleepFactorExternalMediaMounted    = 0x00000200ULL,
+    kIOPMSleepFactorThunderboltDevice       = 0x00000400ULL,
+    kIOPMSleepFactorRTCAlarmScheduled       = 0x00000800ULL,
+    kIOPMSleepFactorMagicPacketWakeEnabled  = 0x00001000ULL,
+    kIOPMSleepFactorHibernateForced         = 0x00010000ULL,
+    kIOPMSleepFactorAutoPowerOffDisabled    = 0x00020000ULL,
+    kIOPMSleepFactorAutoPowerOffForced      = 0x00040000ULL,
+    kIOPMSleepFactorExternalDisplay         = 0x00080000ULL
+};
+
+// System Sleep Types
+enum {
+    kIOPMSleepTypeInvalid                   = 0,
+    kIOPMSleepTypeAbortedSleep              = 1,
+    kIOPMSleepTypeNormalSleep               = 2,
+    kIOPMSleepTypeSafeSleep                 = 3,
+    kIOPMSleepTypeHibernate                 = 4,
+    kIOPMSleepTypeStandby                   = 5,
+    kIOPMSleepTypePowerOff                  = 6,
+    kIOPMSleepTypeLast                      = 7
+};
+
+// System Sleep Flags
+enum {
+    kIOPMSleepFlagDisableHibernateAbort     = 0x00000001,
+    kIOPMSleepFlagDisableUSBWakeEvents      = 0x00000002,
+    kIOPMSleepFlagDisableBatlowAssertion    = 0x00000004
+};
+
+// System Wake Events
+enum {
+    kIOPMWakeEventLidOpen                   = 0x00000001,
+    kIOPMWakeEventLidClose                  = 0x00000002,
+    kIOPMWakeEventACAttach                  = 0x00000004,
+    kIOPMWakeEventACDetach                  = 0x00000008,
+    kIOPMWakeEventCDInsert                  = 0x00000010,
+    kIOPMWakeEventCDEject                   = 0x00000020,
+    kIOPMWakeEventHPDAttach                 = 0x00000040,
+    kIOPMWakeEventHPDDetach                 = 0x00000080,
+    kIOPMWakeEventPowerButton               = 0x00000100,
+    kIOPMWakeEventG3PowerOn                 = 0x00000200,
+    kIOPMWakeEventUserPME                   = 0x00000400,
+    kIOPMWakeEventSleepTimer                = 0x00000800,
+    kIOPMWakeEventBatteryLow                = 0x00001000,
+    kIOPMWakeEventDarkPME                   = 0x00002000
+};
+
+/*!
+ * @defined kIOPMSystemSleepParametersKey
+ * @abstract Sleep parameters describing the upcoming sleep
+ * @discussion Root domain updates this OSData property before system sleep
+ * to pass sleep parameters to the platform driver.  Some of the parameters
+ * are based on the chosen entry in the system sleep policy table.
+ */
 #define kIOPMSystemSleepParametersKey       "IOPMSystemSleepParameters"
+#define kIOPMSystemSleepParametersVersion   2
 
 struct IOPMSystemSleepParameters
 {
-    uint32_t    version;
+    uint16_t    version;
+    uint16_t    reserved1;
+    uint32_t    sleepType;
     uint32_t    sleepFlags;
-    uint32_t    sleepTimer;
-    uint32_t    wakeEvents;
-};
+    uint32_t    ecWakeEvents;
+    uint32_t    ecWakeTimer;
+    uint32_t    ecPoweroffTimer;
+    uint32_t    reserved2[10];
+} __attribute__((packed));
 
-// Sleep flags
-enum {
-    kIOPMSleepFlagHibernate         = 0x00000001,
-    kIOPMSleepFlagSleepTimerEnable  = 0x00000002
-};
+#if defined(KERNEL) && defined(__cplusplus)
+
+/*!
+ * @defined kIOPMInstallSystemSleepPolicyHandlerKey
+ * @abstract Name of the platform function to install a sleep policy handler.
+ * @discussion Pass to IOPMrootDomain::callPlatformFunction(), with a pointer
+ * to the C-function handler at param1, and an optional target at param2, to
+ * register a sleep policy handler. Only a single sleep policy handler can
+ * be installed.
+ */
+#define kIOPMInstallSystemSleepPolicyHandlerKey	\
+        "IOPMInstallSystemSleepPolicyHandler"
+
+typedef IOReturn (*IOPMSystemSleepPolicyHandler)(
+        void * target,
+        const IOPMSystemSleepPolicyVariables * vars,
+        IOPMSystemSleepParameters * params );
+
+#endif /* KERNEL */
 
 #endif /* ! _IOKIT_IOPMPRIVATE_H */

iokit/IOKit/pwr_mgt/RootDomain.h

     IONotifier *            systemCapabilityNotifier;
 
     IOPMTimeline            *timeline;
-    
+
     typedef struct {
         uint32_t            pid;
         uint32_t            refcount;
     OSSet *                 preventIdleSleepList;
     OSSet *                 preventSystemSleepList;
 
+    UInt32                  _scheduledAlarms;
+
 #if HIBERNATION
     clock_sec_t             _standbyTimerResetSeconds;
 #endif
 
 #if HIBERNATION
     bool        getSleepOption( const char * key, uint32_t * option );
-    bool        evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int sleepPhase );
+    bool        evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int phase );
     void        evaluateSystemSleepPolicyEarly( void );
     void        evaluateSystemSleepPolicyFinal( void );
 #endif /* HIBERNATION */

iokit/Kernel/IOBufferMemoryDescriptor.cpp

 #include <IOKit/IOMapper.h>
 #include <IOKit/IOBufferMemoryDescriptor.h>
 #include <libkern/OSDebug.h>
+#include <mach/mach_vm.h>
 
 #include "IOKitKernelInternal.h"
 
+#ifdef IOALLOCDEBUG
+#include <libkern/c++/OSCPPDebug.h>
+#endif
+#include <IOKit/IOStatisticsPrivate.h>
+
+#if IOKITSTATS
+#define IOStatisticsAlloc(type, size) \
+do { \
+	IOStatistics::countAlloc(type, size); \
+} while (0)
+#else
+#define IOStatisticsAlloc(type, size)
+#endif /* IOKITSTATS */
+
+
 __BEGIN_DECLS
 void ipc_port_release_send(ipc_port_t port);
 #include <vm/pmap.h>
 
 enum
 {
-    kInternalFlagPhysical  = 0x00000001,
-    kInternalFlagPageSized = 0x00000002
+    kInternalFlagPhysical      = 0x00000001,
+    kInternalFlagPageSized     = 0x00000002,
+    kInternalFlagPageAllocated = 0x00000004
 };
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+#if 0
+#undef assert
+#define assert(ex)  \
+	((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
+#endif
+
+enum
+{
+    kIOPageAllocChunkBytes = (PAGE_SIZE / 64),
+    kIOPageAllocSignature  = 'iopa'
+};
+
+struct io_pagealloc_t
+{
+    queue_chain_t link;
+    uint64_t      avail;
+    uint32_t      signature;
+};
+typedef struct io_pagealloc_t io_pagealloc_t;
+
+typedef char io_pagealloc_t_assert[(sizeof(io_pagealloc_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
+
+IOSimpleLock * gIOPageAllocLock;
+queue_head_t   gIOPageAllocList;
+vm_size_t      gIOPageAllocCount;
+vm_size_t      gIOPageAllocBytes;
+
+static io_pagealloc_t * 
+iopa_allocpage(void)
+{
+    kern_return_t    kr;
+    io_pagealloc_t * pa;
+    vm_address_t     vmaddr = 0;
+
+    int options = 0; // KMA_LOMEM;
+    kr = kernel_memory_allocate(kernel_map, &vmaddr,
+				page_size, 0, options);
+    if (KERN_SUCCESS != kr) return (0);
+
+    bzero((void *) vmaddr, page_size);
+    pa = (typeof(pa)) (vmaddr + page_size - kIOPageAllocChunkBytes);
+
+    pa->signature = kIOPageAllocSignature;
+    pa->avail     = -2ULL;
+
+    return (pa);
+}
+
+static void 
+iopa_freepage(io_pagealloc_t * pa)
+{
+    kmem_free( kernel_map, trunc_page((uintptr_t) pa), page_size);
+}
+
+static uintptr_t
+iopa_allocinpage(io_pagealloc_t * pa, uint32_t count, uint64_t align)
+{
+    uint32_t n, s;
+    uint64_t avail = pa->avail;
+
+    assert(avail);
+
+    // find strings of count 1 bits in avail
+    for (n = count; n > 1; n -= s)
+    {
+    	s = n >> 1;
+    	avail = avail & (avail << s);
+    }
+    // and aligned
+    avail &= align;
+
+    if (avail)
+    {
+	n = __builtin_clzll(avail);
+	pa->avail &= ~((-1ULL << (64 - count)) >> n);
+	if (!pa->avail && pa->link.next)