Server IP : 127.0.0.2 / Your IP : 18.116.14.133 Web Server : Apache/2.4.18 (Ubuntu) System : User : www-data ( ) PHP Version : 7.0.33-0ubuntu0.16.04.16 Disable Function : disk_free_space,disk_total_space,diskfreespace,dl,exec,fpaththru,getmyuid,getmypid,highlight_file,ignore_user_abord,leak,listen,link,opcache_get_configuration,opcache_get_status,passthru,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,php_uname,phpinfo,posix_ctermid,posix_getcwd,posix_getegid,posix_geteuid,posix_getgid,posix_getgrgid,posix_getgrnam,posix_getgroups,posix_getlogin,posix_getpgid,posix_getpgrp,posix_getpid,posix,_getppid,posix_getpwnam,posix_getpwuid,posix_getrlimit,posix_getsid,posix_getuid,posix_isatty,posix_kill,posix_mkfifo,posix_setegid,posix_seteuid,posix_setgid,posix_setpgid,posix_setsid,posix_setuid,posix_times,posix_ttyname,posix_uname,pclose,popen,proc_open,proc_close,proc_get_status,proc_nice,proc_terminate,shell_exec,source,show_source,system,virtual MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : ON | Sudo : ON | Pkexec : ON Directory : /usr/lib/python2.7/dist-packages/mercurial/pure/ |
Upload File : |
# mpatch.py - Python implementation of mpatch.c # # Copyright 2009 Matt Mackall <mpm@selenic.com> and others # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import cStringIO import struct StringIO = cStringIO.StringIO # This attempts to apply a series of patches in time proportional to # the total size of the patches, rather than patches * len(text). This # means rather than shuffling strings around, we shuffle around # pointers to fragments with fragment lists. # # When the fragment lists get too long, we collapse them. To do this # efficiently, we do all our operations inside a buffer created by # mmap and simply use memmove. This avoids creating a bunch of large # temporary string buffers. def patches(a, bins): if not bins: return a plens = [len(x) for x in bins] pl = sum(plens) bl = len(a) + pl tl = bl + bl + pl # enough for the patches and two working texts b1, b2 = 0, bl if not tl: return a m = StringIO() def move(dest, src, count): """move count bytes from src to dest The file pointer is left at the end of dest. """ m.seek(src) buf = m.read(count) m.seek(dest) m.write(buf) # load our original text m.write(a) frags = [(len(a), b1)] # copy all the patches into our segment so we can memmove from them pos = b2 + bl m.seek(pos) for p in bins: m.write(p) def pull(dst, src, l): # pull l bytes from src while l: f = src.pop() if f[0] > l: # do we need to split? src.append((f[0] - l, f[1] + l)) dst.append((l, f[1])) return dst.append(f) l -= f[0] def collect(buf, list): start = buf for l, p in reversed(list): move(buf, p, l) buf += l return (buf - start, start) for plen in plens: # if our list gets too long, execute it if len(frags) > 128: b2, b1 = b1, b2 frags = [collect(b1, frags)] new = [] end = pos + plen last = 0 while pos < end: m.seek(pos) p1, p2, l = struct.unpack(">lll", m.read(12)) pull(new, frags, p1 - last) # what didn't change pull([], frags, p2 - p1) # what got deleted new.append((l, pos + 12)) # what got added pos += l + 12 last = p2 frags.extend(reversed(new)) # what was left at the end t = collect(b2, frags) m.seek(t[1]) return m.read(t[0]) def patchedsize(orig, delta): outlen, last, bin = 0, 0, 0 binend = len(delta) data = 12 while data <= binend: decode = delta[bin:bin + 12] start, end, length = struct.unpack(">lll", decode) if start > end: break bin = data + length data = bin + 12 outlen += start - last last = end outlen += length if bin != binend: raise ValueError("patch cannot be decoded") outlen += orig - last return outlen