mldonkey-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/...


From: mldonkey-commits
Subject: [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/...
Date: Thu, 19 Apr 2007 13:44:16 +0000

CVSROOT:        /sources/mldonkey
Module name:    mldonkey
Changes by:     spiralvoice <spiralvoice>       07/04/19 13:44:16

Modified files:
        distrib        : ChangeLog 
        src/networks/donkey: donkeyFiles.ml donkeyOptions.ml 

Log message:
        patch #5857

CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/mldonkey/distrib/ChangeLog?cvsroot=mldonkey&r1=1.1236&r2=1.1237
http://cvs.savannah.gnu.org/viewcvs/mldonkey/src/networks/donkey/donkeyFiles.ml?cvsroot=mldonkey&r1=1.28&r2=1.29
http://cvs.savannah.gnu.org/viewcvs/mldonkey/src/networks/donkey/donkeyOptions.ml?cvsroot=mldonkey&r1=1.63&r2=1.64

Patches:
Index: distrib/ChangeLog
===================================================================
RCS file: /sources/mldonkey/mldonkey/distrib/ChangeLog,v
retrieving revision 1.1236
retrieving revision 1.1237
diff -u -b -r1.1236 -r1.1237
--- distrib/ChangeLog   19 Apr 2007 13:37:10 -0000      1.1236
+++ distrib/ChangeLog   19 Apr 2007 13:44:16 -0000      1.1237
@@ -15,6 +15,7 @@
 =========
 
 2007/04/19
+5857: EDK: Remove upload compression (patch #5665) due to ineffective caching
 5875: Swarmer: Some cosmetic changes (pango)
 5856: EDK: Set TCP backlog to max_upload_slots for donkey_port socket
 5869: EDK/Swarmer: Try harder to read corrupt files.ini

Index: src/networks/donkey/donkeyFiles.ml
===================================================================
RCS file: /sources/mldonkey/mldonkey/src/networks/donkey/donkeyFiles.ml,v
retrieving revision 1.28
retrieving revision 1.29
diff -u -b -r1.28 -r1.29
--- src/networks/donkey/donkeyFiles.ml  19 Feb 2007 21:19:44 -0000      1.28
+++ src/networks/donkey/donkeyFiles.ml  19 Apr 2007 13:44:16 -0000      1.29
@@ -61,51 +61,6 @@
 where nseconds = Fifo.length upload_clients
   
   *)
-exception Cache_table_hit of string * string
-type cache_entry = {
-  md4 : Md4.t;
-  begin_offset : int64;
-  end_offset : int64;
-  cached_part : string;
-  comp_part : string
-  }
-let cache_table_index = ref 0
-let cache_table_size = !!upload_compression_table_size
-let ( cache_table : cache_entry Weak.t ) = Weak.create cache_table_size
-let cached_load file begin_offset end_offset compress=
-  try
-    for i = 0 to cache_table_size-1 do
-      match Weak.get cache_table i with
-        Some 
({md4=md4;begin_offset=bo;end_offset=eo;cached_part=cached_file;comp_part=cached_comp})
 when (md4 = file.file_md4) && (bo=begin_offset) && (eo=end_offset) ->
-          if !verbose_upload then
-            lprintf_nl "Cache Hit for %s (%Ld,%Ld)" (file_best_name file) 
begin_offset end_offset;
-          if (compress && (String.length cached_comp > 0)) || not compress 
then raise (Cache_table_hit(cached_file,cached_comp))
-        | _ -> ()
-    done;
-    let entry_length = Int64.to_int(end_offset -- begin_offset) in
-    let cached_file = String.create entry_length in
-    Unix32.read (file_fd file) begin_offset cached_file 0 entry_length;
-    let cached_comp = if compress then
-      Zlib.compress_string ~level:!!upload_compression_level cached_file
-    else
-      ""
-    in
-    cache_table_index := (!cache_table_index + 1) mod cache_table_size;
-    let (entry : cache_entry)={
-      md4=file.file_md4;
-      begin_offset=begin_offset;
-      end_offset=end_offset;
-      cached_part=cached_file;
-      comp_part=cached_comp} in
-      Weak.set cache_table !cache_table_index (Some entry);
-    if !verbose_upload then
-      lprintf_nl "Cache Miss for %s (%Ld,%Ld) orig.len %d comp.len %d" 
(file_best_name file) begin_offset end_offset (String.length cached_file) 
(String.length cached_comp);
-    Some (cached_file,cached_comp)
-  with
-    | Cache_table_hit (cached_file,cached_comp) -> Some 
(cached_file,cached_comp)
-    | End_of_file -> if !verbose then lprintf_nl
-              "End_of_file in cached_load file %s size %Ld begin %Ld end %Ld" 
(file_best_name file) (file_size file) begin_offset end_offset; None
-
   
 module NewUpload = struct
     
@@ -119,22 +74,25 @@
            M.CloseSlotReq Q.t)
 *)
     
-    let rec send_small_block_plain c sock file begin_offset cfile pos len_int 
sixtyfour = 
+    let rec send_small_block c sock file begin_pos len_int = 
+(*      lprintf "send_small_block %d\n" len_int; *)
+(*      let len_int = Int32.to_int len in *)
       try
        if !verbose_upload then
-          lprintf_nl "Sending plain %s to %s, begin_offset %Ld pos %d len %d"
+         lprintf_nl "Sending %s to %s, begin %Ld len %d"
            (file_best_name file) (full_client_identifier c)
-          (begin_offset) (pos) (len_int);
+            (begin_pos) (len_int);
         
+        if file_is_largefile file && c.client_emule_proto.emule_largefiles <> 
1 then raise Donkey_large_file;
         let msg =  
           (
             let module M = DonkeyProtoClient in
             let module B = M.Bloc in
             M.BlocReq {  
               B.md4 = file.file_md4;
-              B.usesixtyfour = sixtyfour;
-              B.start_pos = begin_offset ++ (Int64.of_int pos);
-              B.end_pos = begin_offset ++ (Int64.of_int(pos + len_int));
+              B.usesixtyfour = (begin_pos ++ (Int64.of_int len_int)) > 
old_max_emule_file_size;
+              B.start_pos = begin_pos;
+              B.end_pos = begin_pos ++ (Int64.of_int len_int);
               B.bloc_str = "";
               B.bloc_begin = 0;
               B.bloc_len = 0; 
@@ -145,7 +103,7 @@
         let upload_buffer = String.create (slen + len_int) in
         String.blit s 0 upload_buffer 0 slen;
         DonkeyProtoCom.new_string msg upload_buffer;
-        String.blit cfile pos upload_buffer slen len_int;
+        Unix32.read (file_fd file) begin_pos upload_buffer slen len_int;
         let uploaded = Int64.of_int len_int in
         count_upload c uploaded;
        CommonUploads.consume_bandwidth len_int;
@@ -158,91 +116,32 @@
         write_string sock upload_buffer;
         check_end_upload c sock
       with
-      | e -> if !verbose then lprintf_nl
-              "Exception %s in send_small_block_plain" (Printexc2.to_string e)
-    
-    let rec send_small_block_compressed c sock file begin_offset ccomp pos 
len_int pay_len sixtyfour = 
-      try
-        if !verbose_upload then
-          lprintf_nl "Sending compressed %s to %s, begin_offset %Ld pos %d len 
%d"
+      | End_of_file -> lprintf_nl "Can not send file %s to %s, file removed?"
+                        (file_best_name file) (full_client_identifier c)
+      | Donkey_large_file -> lprintf_nl "File %s is too large for %s."
                         (file_best_name file) (full_client_identifier c)
-          (begin_offset) (pos) (len_int);
-        
-        let msg =  
-          (
-            let module M = DonkeyProtoClient in
-            let module B = M.EmuleCompressedPart in
-            M.EmuleCompressedPart {  
-              B.md4 = file.file_md4;
-              B.usesixtyfour = sixtyfour;
-              B.statpos = begin_offset;
-              B.newsize = Int64.of_int pay_len;
-              B.bloc = "";
-            }
-          ) in
-        let s = client_msg_to_string c.client_emule_proto msg in
-        let slen = String.length s in
-        let upload_buffer = String.create (slen + len_int) in
-        String.blit s 0 upload_buffer 0 slen;
-        DonkeyProtoCom.new_string msg upload_buffer;
-        String.blit ccomp pos upload_buffer slen len_int;
-        CommonUploads.consume_bandwidth len_int;
-        
-        write_string sock upload_buffer;
-        check_end_upload c sock
-      with
       | e -> if !verbose then lprintf_nl
-              "Exception %s in send_small_block_compressed" 
(Printexc2.to_string e)
+              "Exception %s in send_small_block" (Printexc2.to_string e)
     
     let rec send_client_block c sock per_client =
-      try
+(*      lprintf "send_client_block\n"; *)
       if per_client > 0 && CommonUploads.can_write_len sock max_msg_size then
         match c.client_upload with
-        | Some ({ up_chunks = (begin_offset,end_offset) :: chunks } as up)  ->
-            if file_is_largefile up.up_file && 
c.client_emule_proto.emule_largefiles <> 1 then begin
-              DonkeyOneFile.remove_client_slot c;
-              lprintf_nl "File %s is too large for %s." (file_best_name 
up.up_file) (full_client_identifier c);
-            end else
-            if up.up_file.file_shared = None then
+        | Some ({ up_chunks = current_chunk :: chunks } as up)  ->
+            if up.up_file.file_shared = None then begin
 (* Is there a message to warn that a file is not shared anymore ? *)
-              DonkeyOneFile.remove_client_slot c
-              else
-            let compress =
-              !!upload_compression &&
-              (c.client_emule_proto.emule_compression <> 0) &&
-              not (List.mem (String.lowercase (Filename2.last_extension2 
(file_best_name up.up_file)))
-                  !!upload_compression_ext_exclude)
-            in
-            let cfile,ccomp = match cached_load up.up_file begin_offset 
end_offset compress with
-              Some (cached_file,cached_comp) -> cached_file,cached_comp
-              | _ -> "",""
-            in
-            let compressed = compress && ((String.length ccomp) + 
!!upload_compression_threshold < (String.length cfile)) in
-            let pay_len = if compressed then (String.length ccomp) else 
(String.length cfile) in
-            let pos = Int64.to_int (up.up_pos -- begin_offset) in
-            let max_len = pay_len - pos in
-            let allowed_msg_block_size_int = min msg_block_size_int per_client 
in
-            let sixtyfour = end_offset >= old_max_emule_file_size in
-            if max_len <= allowed_msg_block_size_int then
+                c.client_upload <- None;
+              end else
+            let max_len = up.up_end_chunk -- up.up_pos in
+            let max_len = Int64.to_int max_len in
+            let msg_block_size_int = min msg_block_size_int per_client in
+            if max_len <= msg_block_size_int then
 (* last block from chunk *)
               begin
-                if compressed then
-                  begin
-                    send_small_block_compressed c sock up.up_file begin_offset 
ccomp pos max_len pay_len sixtyfour;
-                    let uploaded = end_offset -- begin_offset in
-                    count_upload c uploaded;
-                    (match up.up_file.file_shared with None -> ()
-                      | Some impl ->
-                        shared_must_update_downloaded (as_shared impl);
-                        impl.impl_shared_uploaded <- 
-                        impl.impl_shared_uploaded ++ uploaded)
-                  end
-                else 
-                  send_small_block_plain c sock up.up_file begin_offset cfile 
pos max_len sixtyfour
-                ;
+                send_small_block c sock up.up_file up.up_pos max_len;
                 if !verbose_upload then
-                    lprintf_nl "End of chunk %Ld %Ld %s" begin_offset 
end_offset (file_best_name up.up_file);
-               up.up_flying_chunks <- up.up_flying_chunks @ 
[(begin_offset,end_offset)];
+                    lprintf_nl "End of chunk (%d) %Ld %s" max_len 
up.up_end_chunk (file_best_name up.up_file);
+               up.up_flying_chunks <- up.up_flying_chunks @ [current_chunk];
                 up.up_chunks <- chunks;
                 let per_client = per_client - max_len in
                 match chunks with
@@ -259,25 +158,14 @@
               end
             else
 (* small block from chunk *)
-              if allowed_msg_block_size_int >= msg_block_size_int then
-                begin
-                  if compressed then
               begin
-                      send_small_block_compressed c sock up.up_file 
begin_offset ccomp pos msg_block_size_int pay_len sixtyfour;
-                    end
-                  else
-                    begin
-                      send_small_block_plain c sock up.up_file begin_offset 
cfile pos msg_block_size_int sixtyfour;
-                    end
-                  ;
+                send_small_block c sock up.up_file up.up_pos 
+                  msg_block_size_int;
                 up.up_pos <- up.up_pos ++ (Int64.of_int msg_block_size_int);
                 let per_client = per_client-msg_block_size_int in
                 send_client_block c sock per_client
               end
         | _ -> ()
-      with
-      | e -> if !verbose then lprintf_nl
-              "Exception %s in send_client_block" (Printexc2.to_string e)
     
     let upload_to_client c size = 
 (*      lprintf "upload_to_client %d\n" size; *)

Index: src/networks/donkey/donkeyOptions.ml
===================================================================
RCS file: /sources/mldonkey/mldonkey/src/networks/donkey/donkeyOptions.ml,v
retrieving revision 1.63
retrieving revision 1.64
diff -u -b -r1.63 -r1.64
--- src/networks/donkey/donkeyOptions.ml        8 Apr 2007 14:31:43 -0000       
1.63
+++ src/networks/donkey/donkeyOptions.ml        19 Apr 2007 13:44:16 -0000      
1.64
@@ -195,55 +195,6 @@
   "Uploaded zones (1 zone = 180 kBytes) needed to enable the dynamic upload 
lifetime"
     int_option 10
 
-let upload_compression = define_expert_option donkey_section 
["upload_compression"]
-  "Enables compressed upload as part of the protocol"
-    bool_option true
-
-let upload_compression_threshold = define_expert_option donkey_section 
["upload_compression_threshold"]
-  "Sizedifference in bytes between one zone (180 kBytes) and its compressed
-  counterpart, which has to occure, to send compressed parts instead of plain."
-    int_option 2000
-
-let upload_compression_ext_exclude = define_expert_option donkey_section 
["upload_compression_ext_exclude"]
-  "Disable upload compression based on file extensions (without dot)"
-    string_list_option ["zip"; "7z"; "gz"; "bz2"; "rar"; "ace"; "ogm"; "avi"; 
"mpg"]
-
-let _ =
-  option_hook upload_compression_threshold (fun _ ->
-    if !!upload_compression_threshold < 0 then
-        upload_compression_threshold =:= 0
-  );
-  option_hook upload_compression_ext_exclude (fun _ ->
-    let l = List.map String.lowercase !!upload_compression_ext_exclude in
-    if !!upload_compression_ext_exclude <> l then 
upload_compression_ext_exclude =:= l
-  )
-
-let upload_compression_level = define_expert_option donkey_section 
["upload_compression_level"]
-  "Level of the used zlibcompression. allowed are values between 0 and 9. 
higher
-  level means better compression, but higher cpu usage too. (emules default
-  compression level for compressed parts is 9)"
-    int_option 9
-
-let _ =
-  option_hook upload_compression_level (fun _ ->
-    if !!upload_compression_level < 0
-      || !!upload_compression_level > 9 then
-        upload_compression_level =:= 9
-  )
-
-let upload_compression_table_size = define_expert_option donkey_section 
["upload_compression_table_size"]
-  ~restart: true
-  "Size of the cache table in entries (ca. 2 * 180 kbytes). zones have to be
-  compressed at once, but only parts of it are sent at a time (10 kbytes).
-  Minimum value is the number of total upload slots."
-    int_option 20
-
-let _ =
-  option_hook upload_compression_table_size (fun _ ->
-    if !!upload_compression_table_size < !!max_upload_slots then
-        upload_compression_table_size =:= !!max_upload_slots
-  )
-
 let connected_server_timeout = define_expert_option donkey_section 
["connected_server_timeout"]
   "How long can a silent server stay connected"
     float_option 1800.




reply via email to

[Prev in Thread] Current Thread [Next in Thread]