Merge branch 'ns/batched-fsync'

This merges the topic branch (specifically backported onto v2.33.1 to
allow for integrating into Git for Windows' `main` branch) that strikes
a better balance between safety and speed: rather than `fsync()`ing each
and every loose object file, we now offer to do it in a batch.

This will become the new default in Git for Windows.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
This commit is contained in:
Johannes Schindelin
2021-10-26 17:40:46 +02:00
committed by Victoria Dye
24 changed files with 422 additions and 32 deletions

View File

@@ -576,12 +576,29 @@ core.whitespace::
errors. The default tab width is 8. Allowed values are 1 to 63.
core.fsyncObjectFiles::
This boolean will enable 'fsync()' when writing object files.
A value indicating the level of effort Git will expend in
trying to make objects added to the repo durable in the event
of an unclean system shutdown. This setting currently only
controls loose objects in the object store, so updates to any
refs or the index may not be equally durable.
+
This is a total waste of time and effort on a filesystem that orders
data writes properly, but can be useful for filesystems that do not use
journalling (traditional UNIX filesystems) or that only journal metadata
and not file contents (OS X's HFS+, or Linux ext3 with "data=writeback").
* `false` allows data to remain in file system caches according to
operating system policy, whence it may be lost if the system loses power
or crashes.
* `true` triggers a data integrity flush for each loose object added to the
object store. This is the safest setting that is likely to ensure durability
across all operating systems and file systems that honor the 'fsync' system
call. However, this setting comes with a significant performance cost on
common hardware. Git does not currently fsync parent directories for
newly-added files, so some filesystems may still allow data to be lost on
system crash.
* `batch` enables an experimental mode that uses interfaces available in some
operating systems to write loose object data with a minimal set of FLUSH
CACHE (or equivalent) commands sent to the storage controller. If the
operating system interfaces are not available, this mode behaves the same as
`true`. This mode is expected to be as safe as `true` on macOS for repos
stored on HFS+ or APFS filesystems and on Windows for repos stored on NTFS or
ReFS.
core.preloadIndex::
Enable parallel index preload for operations like 'git diff'

View File

@@ -405,6 +405,8 @@ all::
#
# Define HAVE_CLOCK_MONOTONIC if your platform has CLOCK_MONOTONIC.
#
# Define HAVE_SYNC_FILE_RANGE if your platform has sync_file_range.
#
# Define NEEDS_LIBRT if your platform requires linking with librt (glibc version
# before 2.17) for clock_gettime and CLOCK_MONOTONIC.
#
@@ -1909,6 +1911,10 @@ ifdef HAVE_CLOCK_MONOTONIC
BASIC_CFLAGS += -DHAVE_CLOCK_MONOTONIC
endif
ifdef HAVE_SYNC_FILE_RANGE
BASIC_CFLAGS += -DHAVE_SYNC_FILE_RANGE
endif
ifdef NEEDS_LIBRT
EXTLIBS += -lrt
endif

View File

@@ -1,5 +1,6 @@
#include "builtin.h"
#include "cache.h"
#include "bulk-checkin.h"
#include "config.h"
#include "object-store.h"
#include "object.h"
@@ -503,10 +504,12 @@ static void unpack_all(void)
if (!quiet)
progress = start_progress(_("Unpacking objects"), nr_objects);
CALLOC_ARRAY(obj_list, nr_objects);
plug_bulk_checkin();
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
}
unplug_bulk_checkin();
stop_progress(&progress);
if (delta_list)

View File

@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "bulk-checkin.h"
#include "config.h"
#include "lockfile.h"
#include "quote.h"
@@ -1088,6 +1089,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
the_index.updated_skipworktree = 1;
/* we might be adding many objects to the object database */
plug_bulk_checkin();
/*
* Custom copy of parse_options() because we want to handle
* filename arguments as they come.
@@ -1168,6 +1172,8 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
strbuf_release(&buf);
}
/* by now we must have added all of the new objects */
unplug_bulk_checkin();
if (split_index > 0) {
if (git_config_get_split_index() == 0)
warning(_("core.splitIndex is set to false; "

View File

@@ -3,16 +3,22 @@
*/
#include "cache.h"
#include "bulk-checkin.h"
#include "lockfile.h"
#include "repository.h"
#include "csum-file.h"
#include "pack.h"
#include "strbuf.h"
#include "string-list.h"
#include "tmp-objdir.h"
#include "packfile.h"
#include "object-store.h"
static struct bulk_checkin_state {
unsigned plugged:1;
static int bulk_checkin_plugged;
static int needs_batch_fsync;
static struct tmp_objdir *bulk_fsync_objdir;
static struct bulk_checkin_state {
char *pack_tmp_name;
struct hashfile *f;
off_t offset;
@@ -21,7 +27,7 @@ static struct bulk_checkin_state {
struct pack_idx_entry **written;
uint32_t alloc_written;
uint32_t nr_written;
} state;
} bulk_checkin_state;
static void finish_tmp_packfile(struct strbuf *basename,
const char *pack_tmp_name,
@@ -79,6 +85,34 @@ clear_exit:
reprepare_packed_git(the_repository);
}
/*
* Cleanup after batch-mode fsync_object_files.
*/
static void do_batch_fsync(void)
{
/*
* Issue a full hardware flush against a temporary file to ensure
* that all objects are durable before any renames occur. The code in
* fsync_loose_object_bulk_checkin has already issued a writeout
* request, but it has not flushed any writeback cache in the storage
* hardware.
*/
if (needs_batch_fsync) {
struct strbuf temp_path = STRBUF_INIT;
struct tempfile *temp;
strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", get_object_directory());
temp = xmks_tempfile(temp_path.buf);
fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
delete_tempfile(&temp);
strbuf_release(&temp_path);
}
if (bulk_fsync_objdir)
tmp_objdir_migrate(bulk_fsync_objdir);
}
static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
{
int i;
@@ -273,25 +307,61 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
return 0;
}
void fsync_loose_object_bulk_checkin(int fd)
{
assert(fsync_object_files == FSYNC_OBJECT_FILES_BATCH);
/*
* If we have a plugged bulk checkin, we issue a call that
* cleans the filesystem page cache but avoids a hardware flush
* command. Later on we will issue a single hardware flush
* before as part of do_batch_fsync.
*/
if (bulk_checkin_plugged &&
git_fsync(fd, FSYNC_WRITEOUT_ONLY) >= 0) {
if (!needs_batch_fsync)
needs_batch_fsync = 1;
} else {
fsync_or_die(fd, "loose object file");
}
}
int index_bulk_checkin(struct object_id *oid,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
int status = deflate_to_pack(&state, oid, fd, size, type,
int status = deflate_to_pack(&bulk_checkin_state, oid, fd, size, type,
path, flags);
if (!state.plugged)
finish_bulk_checkin(&state);
if (!bulk_checkin_plugged)
finish_bulk_checkin(&bulk_checkin_state);
return status;
}
void plug_bulk_checkin(void)
{
state.plugged = 1;
assert(!bulk_checkin_plugged);
/*
* A temporary object directory is used to hold the files
* while they are not fsynced.
*/
if (fsync_object_files == FSYNC_OBJECT_FILES_BATCH) {
bulk_fsync_objdir = tmp_objdir_create("bulk-fsync");
if (!bulk_fsync_objdir)
die(_("Could not create temporary object directory for core.fsyncobjectfiles=batch"));
tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0);
}
bulk_checkin_plugged = 1;
}
void unplug_bulk_checkin(void)
{
state.plugged = 0;
if (state.f)
finish_bulk_checkin(&state);
assert(bulk_checkin_plugged);
bulk_checkin_plugged = 0;
if (bulk_checkin_state.f)
finish_bulk_checkin(&bulk_checkin_state);
do_batch_fsync();
}

View File

@@ -6,6 +6,8 @@
#include "cache.h"
void fsync_loose_object_bulk_checkin(int fd);
int index_bulk_checkin(struct object_id *oid,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags);

View File

@@ -995,7 +995,13 @@ void reset_shared_repository(void);
extern int read_replace_refs;
extern char *git_replace_ref_base;
extern int fsync_object_files;
enum fsync_object_files_mode {
FSYNC_OBJECT_FILES_OFF,
FSYNC_OBJECT_FILES_ON,
FSYNC_OBJECT_FILES_BATCH
};
extern enum fsync_object_files_mode fsync_object_files;
extern int use_fsync;
extern int core_preload_index;
extern int precomposed_unicode;

View File

@@ -332,6 +332,9 @@ int mingw_getpagesize(void);
#define getpagesize mingw_getpagesize
#endif
int win32_fsync_no_flush(int fd);
#define fsync_no_flush win32_fsync_no_flush
struct rlimit {
unsigned int rlim_cur;
};

28
compat/win32/flush.c Normal file
View File

@@ -0,0 +1,28 @@
#include "../../git-compat-util.h"
#include <winternl.h>
#include "lazyload.h"
int win32_fsync_no_flush(int fd)
{
IO_STATUS_BLOCK io_status;
#define FLUSH_FLAGS_FILE_DATA_ONLY 1
DECLARE_PROC_ADDR(ntdll.dll, NTSTATUS, NtFlushBuffersFileEx,
HANDLE FileHandle, ULONG Flags, PVOID Parameters, ULONG ParameterSize,
PIO_STATUS_BLOCK IoStatusBlock);
if (!INIT_PROC_ADDR(NtFlushBuffersFileEx)) {
errno = ENOSYS;
return -1;
}
memset(&io_status, 0, sizeof(io_status));
if (NtFlushBuffersFileEx((HANDLE)_get_osfhandle(fd), FLUSH_FLAGS_FILE_DATA_ONLY,
NULL, 0, &io_status)) {
errno = EINVAL;
return -1;
}
return 0;
}

View File

@@ -1491,7 +1491,12 @@ int git_default_core_config(const char *var, const char *value, void *cb)
}
if (!strcmp(var, "core.fsyncobjectfiles")) {
fsync_object_files = git_config_bool(var, value);
if (value && !strcmp(value, "batch"))
fsync_object_files = FSYNC_OBJECT_FILES_BATCH;
else if (git_config_bool(var, value))
fsync_object_files = FSYNC_OBJECT_FILES_ON;
else
fsync_object_files = FSYNC_OBJECT_FILES_OFF;
return 0;
}

View File

@@ -57,6 +57,7 @@ ifeq ($(uname_S),Linux)
HAVE_CLOCK_MONOTONIC = YesPlease
# -lrt is needed for clock_gettime on glibc <= 2.16
NEEDS_LIBRT = YesPlease
HAVE_SYNC_FILE_RANGE = YesPlease
HAVE_GETDELIM = YesPlease
FREAD_READS_DIRECTORIES = UnfortunatelyYes
BASIC_CFLAGS += -DHAVE_SYSINFO
@@ -475,6 +476,7 @@ endif
CFLAGS =
BASIC_CFLAGS = -nologo -I. -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE
COMPAT_OBJS = compat/msvc.o compat/winansi.o \
compat/win32/flush.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/trace2_win32_process_info.o \
@@ -662,6 +664,7 @@ ifeq ($(uname_S),MINGW)
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
compat/win32/trace2_win32_process_info.o \
compat/win32/flush.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o compat/win32/fscache.o

View File

@@ -1095,6 +1095,14 @@ AC_COMPILE_IFELSE([CLOCK_MONOTONIC_SRC],
[AC_MSG_RESULT([no])
HAVE_CLOCK_MONOTONIC=])
GIT_CONF_SUBST([HAVE_CLOCK_MONOTONIC])
#
# Define HAVE_SYNC_FILE_RANGE=YesPlease if sync_file_range is available.
GIT_CHECK_FUNC(sync_file_range,
[HAVE_SYNC_FILE_RANGE=YesPlease],
[HAVE_SYNC_FILE_RANGE])
GIT_CONF_SUBST([HAVE_SYNC_FILE_RANGE])
#
# Define NO_SETITIMER if you don't have setitimer.
GIT_CHECK_FUNC(setitimer,

View File

@@ -286,7 +286,8 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
NOGDI OBJECT_CREATION_MODE=1 __USE_MINGW_ANSI_STDIO=0
USE_NED_ALLOCATOR OVERRIDE_STRDUP MMAP_PREVENTS_DELETE USE_WIN32_MMAP
UNICODE _UNICODE HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET)
list(APPEND compat_SOURCES compat/mingw.c compat/winansi.c compat/win32/path-utils.c
list(APPEND compat_SOURCES compat/mingw.c compat/winansi.c
compat/win32/flush.c compat/win32/path-utils.c
compat/win32/pthread.c compat/win32mmap.c compat/win32/syslog.c
compat/win32/trace2_win32_process_info.c compat/win32/dirent.c
compat/nedmalloc/nedmalloc.c compat/strdup.c compat/win32/fscache.c)

View File

@@ -42,7 +42,7 @@ const char *git_attributes_file;
const char *git_hooks_path;
int zlib_compression_level = Z_BEST_SPEED;
int pack_compression_level = Z_DEFAULT_COMPRESSION;
int fsync_object_files;
enum fsync_object_files_mode fsync_object_files;
int use_fsync = -1;
size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE;
size_t packed_git_limit = DEFAULT_PACKED_GIT_LIMIT;

View File

@@ -1284,6 +1284,13 @@ __attribute__((format (printf, 1, 2))) NORETURN
void BUG(const char *fmt, ...);
#endif
enum fsync_action {
FSYNC_WRITEOUT_ONLY,
FSYNC_HARDWARE_FLUSH
};
int git_fsync(int fd, enum fsync_action action);
/*
* Preserves errno, prints a message, but gives no warning for ENOENT.
* Returns 0 on success, which includes trying to unlink an object that does

View File

@@ -1850,8 +1850,18 @@ int hash_object_file(const struct git_hash_algo *algo, const void *buf,
static void close_loose_object(int fd)
{
if (!the_repository->objects->odb->will_destroy) {
if (fsync_object_files)
switch (fsync_object_files) {
case FSYNC_OBJECT_FILES_OFF:
break;
case FSYNC_OBJECT_FILES_ON:
fsync_or_die(fd, "loose object file");
break;
case FSYNC_OBJECT_FILES_BATCH:
fsync_loose_object_bulk_checkin(fd);
break;
default:
BUG("Invalid fsync_object_files mode.");
}
}
if (close(fd) != 0)

36
t/lib-unique-files.sh Normal file
View File

@@ -0,0 +1,36 @@
# Helper to create files with unique contents
# Create multiple files with unique contents. Takes the number of
# directories, the number of files in each directory, and the base
# directory.
#
# test_create_unique_files 2 3 my_dir -- Creates 2 directories with 3 files
# each in my_dir, all with unique
# contents.
test_create_unique_files() {
test "$#" -ne 3 && BUG "3 param"
local dirs=$1
local files=$2
local basedir=$3
local counter=0
test_tick
local basedata=$test_tick
rm -rf $basedir
for i in $(test_seq $dirs)
do
local dir=$basedir/dir$i
mkdir -p "$dir"
for j in $(test_seq $files)
do
counter=$((counter + 1))
echo "$basedata.$counter" >"$dir/file$j.txt"
done
done
}

43
t/perf/p3700-add.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/sh
#
# This test measures the performance of adding new files to the object database
# and index. The test was originally added to measure the effect of the
# core.fsyncObjectFiles=batch mode, which is why we are testing different values
# of that setting explicitly and creating a lot of unique objects.
test_description="Tests performance of add"
. ./perf-lib.sh
. $TEST_DIRECTORY/lib-unique-files.sh
test_perf_default_repo
test_checkout_worktree
dir_count=10
files_per_dir=50
total_files=$((dir_count * files_per_dir))
# We need to create the files each time we run the perf test, but
# we do not want to measure the cost of creating the files, so run
# the tet once.
if test "${GIT_PERF_REPEAT_COUNT-1}" -ne 1
then
echo "warning: Setting GIT_PERF_REPEAT_COUNT=1" >&2
GIT_PERF_REPEAT_COUNT=1
fi
for m in false true batch
do
test_expect_success "create the files for core.fsyncObjectFiles=$m" '
git reset --hard &&
# create files across directories
test_create_unique_files $dir_count $files_per_dir files
'
test_perf "add $total_files files (core.fsyncObjectFiles=$m)" "
git -c core.fsyncobjectfiles=$m add files
"
done
test_done

46
t/perf/p3900-stash.sh Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/sh
#
# This test measures the performance of adding new files to the object database
# and index. The test was originally added to measure the effect of the
# core.fsyncObjectFiles=batch mode, which is why we are testing different values
# of that setting explicitly and creating a lot of unique objects.
test_description="Tests performance of stash"
. ./perf-lib.sh
. $TEST_DIRECTORY/lib-unique-files.sh
test_perf_default_repo
test_checkout_worktree
dir_count=10
files_per_dir=50
total_files=$((dir_count * files_per_dir))
# We need to create the files each time we run the perf test, but
# we do not want to measure the cost of creating the files, so run
# the tet once.
if test "${GIT_PERF_REPEAT_COUNT-1}" -ne 1
then
echo "warning: Setting GIT_PERF_REPEAT_COUNT=1" >&2
GIT_PERF_REPEAT_COUNT=1
fi
for m in false true batch
do
test_expect_success "create the files for core.fsyncObjectFiles=$m" '
git reset --hard &&
# create files across directories
test_create_unique_files $dir_count $files_per_dir files
'
# We only stash files in the 'files' subdirectory since
# the perf test infrastructure creates files in the
# current working directory that need to be preserved
test_perf "stash 500 files (core.fsyncObjectFiles=$m)" "
git -c core.fsyncobjectfiles=$m stash push -u -- files
"
done
test_done

View File

@@ -8,6 +8,8 @@ test_description='Test of git add, including the -- option.'
TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. $TEST_DIRECTORY/lib-unique-files.sh
# Test the file mode "$1" of the file "$2" in the index.
test_mode_in_index () {
case "$(git ls-files -s "$2")" in
@@ -34,6 +36,24 @@ test_expect_success \
'Test that "git add -- -q" works' \
'touch -- -q && git add -- -q'
test_expect_success 'git add: core.fsyncobjectfiles=batch' "
test_create_unique_files 2 4 fsync-files &&
git -c core.fsyncobjectfiles=batch add -- ./fsync-files/ &&
rm -f fsynced_files &&
git ls-files --stage fsync-files/ > fsynced_files &&
test_line_count = 8 fsynced_files &&
awk -- '{print \$2}' fsynced_files | xargs -n1 git cat-file -e
"
test_expect_success 'git update-index: core.fsyncobjectfiles=batch' "
test_create_unique_files 2 4 fsync-files2 &&
find fsync-files2 ! -type d -print | xargs git -c core.fsyncobjectfiles=batch update-index --add -- &&
rm -f fsynced_files2 &&
git ls-files --stage fsync-files2/ > fsynced_files2 &&
test_line_count = 8 fsynced_files2 &&
awk -- '{print \$2}' fsynced_files2 | xargs -n1 git cat-file -e
"
test_expect_success \
'git add: Test that executable bit is not used if core.filemode=0' \
'git config core.filemode 0 &&

View File

@@ -9,6 +9,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. $TEST_DIRECTORY/lib-unique-files.sh
test_expect_success 'usage on cmd and subcommand invalid option' '
test_expect_code 129 git stash --invalid-option 2>usage &&
@@ -1323,6 +1324,19 @@ test_expect_success 'stash handles skip-worktree entries nicely' '
git rev-parse --verify refs/stash:A.t
'
test_expect_success 'stash with core.fsyncobjectfiles=batch' "
test_create_unique_files 2 4 fsync-files &&
git -c core.fsyncobjectfiles=batch stash push -u -- ./fsync-files/ &&
rm -f fsynced_files &&
# The files were untracked, so use the third parent,
# which contains the untracked files
git ls-tree -r stash^3 -- ./fsync-files/ > fsynced_files &&
test_line_count = 8 fsynced_files &&
awk -- '{print \$3}' fsynced_files | xargs -n1 git cat-file -e
"
test_expect_success 'stash -c stash.useBuiltin=false warning ' '
expected="stash.useBuiltin support has been removed" &&

View File

@@ -162,23 +162,23 @@ test_expect_success 'pack-objects with bogus arguments' '
check_unpack () {
test_when_finished "rm -rf git2" &&
git init --bare git2 &&
git -C git2 unpack-objects -n <"$1".pack &&
git -C git2 unpack-objects <"$1".pack &&
(cd .git && find objects -type f -print) |
while read path
do
cmp git2/$path .git/$path || {
echo $path differs.
return 1
}
done
git $2 init --bare git2 &&
(
git $2 -C git2 unpack-objects -n <"$1".pack &&
git $2 -C git2 unpack-objects <"$1".pack &&
git $2 -C git2 cat-file --batch-check="%(objectname)"
) <obj-list >current &&
cmp obj-list current
}
test_expect_success 'unpack without delta' '
check_unpack test-1-${packname_1}
'
test_expect_success 'unpack without delta (core.fsyncobjectfiles=batch)' '
check_unpack test-1-${packname_1} "-c core.fsyncobjectfiles=batch"
'
test_expect_success 'pack with REF_DELTA' '
packname_2=$(git pack-objects --progress test-2 <obj-list 2>stderr) &&
check_deltas stderr -gt 0
@@ -188,6 +188,10 @@ test_expect_success 'unpack with REF_DELTA' '
check_unpack test-2-${packname_2}
'
test_expect_success 'unpack with REF_DELTA (core.fsyncobjectfiles=batch)' '
check_unpack test-2-${packname_2} "-c core.fsyncobjectfiles=batch"
'
test_expect_success 'pack with OFS_DELTA' '
packname_3=$(git pack-objects --progress --delta-base-offset test-3 \
<obj-list 2>stderr) &&
@@ -198,6 +202,10 @@ test_expect_success 'unpack with OFS_DELTA' '
check_unpack test-3-${packname_3}
'
test_expect_success 'unpack with OFS_DELTA (core.fsyncobjectfiles=batch)' '
check_unpack test-3-${packname_3} "-c core.fsyncobjectfiles=batch"
'
test_expect_success 'compare delta flavors' '
perl -e '\''
defined($_ = -s $_) or die for @ARGV;

View File

@@ -546,6 +546,54 @@ int xmkstemp_mode(char *filename_template, int mode)
return fd;
}
int git_fsync(int fd, enum fsync_action action)
{
switch (action) {
case FSYNC_WRITEOUT_ONLY:
#ifdef __APPLE__
/*
* on macOS, fsync just causes filesystem cache writeback but does not
* flush hardware caches.
*/
return fsync(fd);
#endif
#ifdef HAVE_SYNC_FILE_RANGE
/*
* On linux 2.6.17 and above, sync_file_range is the way to issue
* a writeback without a hardware flush. An offset of 0 and size of 0
* indicates writeout of the entire file and the wait flags ensure that all
* dirty data is written to the disk (potentially in a disk-side cache)
* before we continue.
*/
return sync_file_range(fd, 0, 0, SYNC_FILE_RANGE_WAIT_BEFORE |
SYNC_FILE_RANGE_WRITE |
SYNC_FILE_RANGE_WAIT_AFTER);
#endif
#ifdef fsync_no_flush
return fsync_no_flush(fd);
#endif
errno = ENOSYS;
return -1;
case FSYNC_HARDWARE_FLUSH:
#ifdef __APPLE__
return fcntl(fd, F_FULLFSYNC);
#else
return fsync(fd);
#endif
default:
BUG("unexpected git_fsync(%d) call", action);
}
}
static int warn_if_unremovable(const char *op, const char *file, int rc)
{
int err;

View File

@@ -62,7 +62,7 @@ void fsync_or_die(int fd, const char *msg)
use_fsync = git_env_bool("GIT_TEST_FSYNC", 1);
if (!use_fsync)
return;
while (fsync(fd) < 0) {
while (git_fsync(fd, FSYNC_HARDWARE_FLUSH) < 0) {
if (errno != EINTR)
die_errno("fsync error on '%s'", msg);
}